diff --git a/.env.example b/.env.dev.example
similarity index 100%
rename from .env.example
rename to .env.dev.example
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9d122096..e4b913c0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -39,7 +39,7 @@ jobs:
run: |
docker run -v ${PWD}/docs/sources:/hugo/content/docs/oncall/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
- unit-test-backend:
+ unit-test-backend-mysql-rabbitmq:
runs-on: ubuntu-latest
container: python:3.9
env:
@@ -66,11 +66,11 @@ jobs:
pip install -r requirements.txt
./wait_for_test_mysql_start.sh && pytest --ds=settings.ci-test -x
- unit-test-backend-postgresql:
+ unit-test-backend-postgresql-rabbitmq:
runs-on: ubuntu-latest
container: python:3.9
env:
- DB_BACKEND: postgresql
+ DATABASE_TYPE: postgresql
DJANGO_SETTINGS_MODULE: settings.ci-test
SLACK_CLIENT_OAUTH_ID: 1
services:
@@ -98,3 +98,29 @@ jobs:
pip install -r requirements.txt
pytest --ds=settings.ci-test -x
+ unit-test-backend-sqlite-redis:
+ runs-on: ubuntu-latest
+ container: python:3.9
+ env:
+ DATABASE_TYPE: sqlite3
+ BROKER_TYPE: redis
+ REDIS_URI: redis://redis_test:6379
+ DJANGO_SETTINGS_MODULE: settings.ci-test
+ SLACK_CLIENT_OAUTH_ID: 1
+ services:
+ redis_test:
+ image: redis:7.0.5
+ options: >-
+ --health-cmd "redis-cli ping"
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Unit Test Backend
+ run: |
+ apt-get update && apt-get install -y netcat
+ cd engine/
+ pip install -r requirements.txt
+ pytest --ds=settings.ci-test -x
diff --git a/.gitignore b/.gitignore
index cadd75d3..d0748610 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,10 +1,12 @@
# Backend
*/db.sqlite3
+engine/oncall_dev.db
*.pyc
venv
.python-version
.env
.env_hobby
+.env.dev
.vscode
dump.rdb
.idea
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c13f348c..01e27157 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,8 +1,28 @@
# Change Log
+## v1.0.40 (2022-10-05)
+- Improved database and celery backends support
+- Added script to import PagerDuty users to Grafana
+- Bug fixes
+
+## v1.0.39 (2022-10-03)
+
+- Fix issue in v1.0.38 blocking the creation of schedules and webhooks in the UI
+
+## v1.0.38 (2022-09-30)
+
+- Fix exception handling for adding resolution notes when slack and oncall users are out of sync.
+- Fix all day events showing as having gaps in slack notifications
+- Improve plugin configuration error message readability
+- Add `telegram` key to `permalinks` property in `AlertGroup` public API response schema
+
## v1.0.37 (2022-09-21)
+- Improve API token creation form
+- Fix alert group bulk action bugs
- Add `permalinks` property to `AlertGroup` public API response schema
+- Scheduling system bug fixes
+- Public API bug fixes
## v1.0.36 (2022-09-12)
diff --git a/DEVELOPER.md b/DEVELOPER.md
index da536813..347f7e95 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -56,15 +56,15 @@ python --version
# Make sure you have latest pip and wheel support
pip install -U pip wheel
-# Copy and check .env file.
-cp .env.example .env
+# Copy and check .env.dev file.
+cp .env.dev.example .env.dev
-# NOTE: if you want to use the PostgreSQL db backend add DB_BACKEND=postgresql to your .env file;
-# currently allowed backend values are `mysql` (default) and `postgresql`
+# NOTE: if you want to use the PostgreSQL db backend add DATABASE_TYPE=postgresql to your .env.dev file;
+# currently allowed backend values are `mysql` (default), `postgresql` and `sqlite3`
-# Apply .env to current terminal.
+# Apply .env.dev to current terminal.
# For PyCharm it's better to use https://plugins.jetbrains.com/plugin/7861-envfile/
-export $(grep -v '^#' .env | xargs -0)
+export $(grep -v '^#' .env.dev | xargs -0)
# Install dependencies.
# Hint: there is a known issue with uwsgi. It's not used in the local dev environment. Feel free to comment it in `engine/requirements.txt`.
@@ -83,7 +83,7 @@ python manage.py createsuperuser
# Http server:
python manage.py runserver 0.0.0.0:8080
-# Worker for background tasks (run it in the parallel terminal, don't forget to export .env there)
+# Worker for background tasks (run it in the parallel terminal, don't forget to export .env.dev there)
python manage.py start_celery
# Additionally you could launch the worker with periodic tasks launcher (99% you don't need this)
@@ -248,7 +248,7 @@ Credentials: admin/admin
### Running tests locally
-In the `engine` directory, with the `.env` vars exported and virtualenv activated
+In the `engine` directory, with the `.env.dev` vars exported and virtualenv activated
```bash
pytest
@@ -265,10 +265,10 @@ pytest -n4
### PyCharm
-1. Create venv and copy .env file
+1. Create venv and copy .env.dev file
```bash
python3.9 -m venv venv
- cp .env.example .env
+ cp .env.dev.example .env.dev
```
2. Open the project in PyCharm
3. Settings → Project OnCall
@@ -279,5 +279,5 @@ pytest -n4
- Set Django project root to /engine
- Set Settings to settings/dev.py
5. Create a new Django Server run configuration to Run/Debug the engine
- - Use a plugin such as EnvFile to load the .env file
+ - Use a plugin such as EnvFile to load the .env.dev file
- Change port from 8000 to 8080
diff --git a/README.md b/README.md
index f6c0e447..51eaa6b1 100644
--- a/README.md
+++ b/README.md
@@ -21,50 +21,55 @@ Developer-friendly incident response with brilliant Slack integration.
We prepared multiple environments: [production](https://grafana.com/docs/grafana-cloud/oncall/open-source/#production-environment), [developer](DEVELOPER.md) and hobby:
1. Download docker-compose.yaml:
+
```bash
curl -fsSL https://raw.githubusercontent.com/grafana/oncall/dev/docker-compose.yml -o docker-compose.yml
```
2. Set variables:
+
```bash
echo "DOMAIN=http://localhost:8080
+COMPOSE_PROFILES=with_grafana # Remove this line if you want to use existing grafana
SECRET_KEY=my_random_secret_must_be_more_than_32_characters_long
RABBITMQ_PASSWORD=rabbitmq_secret_pw
-MYSQL_PASSWORD=mysql_secret_pw
-COMPOSE_PROFILES=with_grafana # Remove this line if you want to use existing grafana
-GRAFANA_USER=admin
-GRAFANA_PASSWORD=admin" > .env_hobby
+MYSQL_PASSWORD=mysql_secret_pw" > .env
```
3. Launch services:
+
```bash
-docker-compose --env-file .env_hobby -f docker-compose.yml up -d
+docker-compose up -d
```
4. Issue one-time invite token:
+
```bash
-docker-compose --env-file .env_hobby -f docker-compose.yml run engine python manage.py issue_invite_for_the_frontend --override
+docker-compose run engine python manage.py issue_invite_for_the_frontend --override
```
+**Note**: if you remove the plugin configuration and reconfigure it, you will need to generate a new one-time invite token for your new configuration.
+
5. Go to [OnCall Plugin Configuration](http://localhost:3000/plugins/grafana-oncall-app), using log in credentials as defined above: `admin`/`admin` (or find OnCall plugin in configuration->plugins) and connect OnCall _plugin_ with OnCall _backend_:
+
```
Invite token: ^^^ from the previous step.
OnCall backend URL: http://engine:8080
Grafana Url: http://grafana:3000
```
-6. Enjoy! Check our [OSS docs](https://grafana.com/docs/grafana-cloud/oncall/open-source/) if you want to set up Slack, Telegram, Twilio or SMS/calls through Grafana Cloud.
-
+6. Enjoy! Check our [OSS docs](https://grafana.com/docs/grafana-cloud/oncall/open-source/) if you want to set up Slack, Telegram, Twilio or SMS/calls through Grafana Cloud.
## Update version
+
To update your Grafana OnCall hobby environment:
```shell
-# Update Docker images
-docker-compose --env-file .env_hobby -f docker-compose.yml pull engine celery oncall_db_migration
+# Update Docker image
+docker-compose pull engine
# Re-deploy
-docker-compose --env-file .env_hobby -f docker-compose.yml up -d --remove-orphans
+docker-compose up -d
```
After updating the engine, you'll also need to click the "Update" button on the [plugin version page](http://localhost:3000/plugins/grafana-oncall-app?page=version-history).
@@ -76,14 +81,13 @@ See [Grafana docs](https://grafana.com/docs/grafana/latest/administration/plugin
-
## Stargazers over time
[](https://starchart.cc/grafana/oncall)
-
## Further Reading
-- *Migration from the PagerDuty* - [Migrator](https://github.com/grafana/oncall/tree/dev/tools/pagerduty-migrator)
-- *Documentation* - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/)
-- *Blog Post* - [Announcing Grafana OnCall, the easiest way to do on-call management](https://grafana.com/blog/2021/11/09/announcing-grafana-oncall/)
-- *Presentation* - [Deep dive into the Grafana, Prometheus, and Alertmanager stack for alerting and on-call management](https://grafana.com/go/observabilitycon/2021/alerting/?pg=blog)
+
+- _Migration from the PagerDuty_ - [Migrator](https://github.com/grafana/oncall/tree/dev/tools/pagerduty-migrator)
+- _Documentation_ - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/)
+- _Blog Post_ - [Announcing Grafana OnCall, the easiest way to do on-call management](https://grafana.com/blog/2021/11/09/announcing-grafana-oncall/)
+- _Presentation_ - [Deep dive into the Grafana, Prometheus, and Alertmanager stack for alerting and on-call management](https://grafana.com/go/observabilitycon/2021/alerting/?pg=blog)
diff --git a/docker-compose-developer-pg.yml b/docker-compose-developer-pg.yml
index f6f813f2..f42f17e3 100644
--- a/docker-compose-developer-pg.yml
+++ b/docker-compose-developer-pg.yml
@@ -1,52 +1,62 @@
-version: '3.2'
+version: "3.8"
services:
-
postgres:
image: postgres:14.4
- platform: linux/x86_64
- mem_limit: 500m
- cpus: 0.5
restart: always
ports:
- - 5432:5432
+ - "5432:5432"
environment:
POSTGRES_DB: oncall_local_dev
POSTGRES_PASSWORD: empty
- POSTGRES_INITDB_ARGS: '--encoding=UTF-8'
+ POSTGRES_INITDB_ARGS: --encoding=UTF-8
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
redis:
image: redis
- mem_limit: 100m
- cpus: 0.1
restart: always
ports:
- - 6379:6379
+ - "6379:6379"
+ deploy:
+ resources:
+ limits:
+ memory: 100m
+ cpus: '0.1'
rabbit:
image: "rabbitmq:3.7.15-management"
- mem_limit: 1000m
- cpus: 0.5
environment:
RABBITMQ_DEFAULT_USER: "rabbitmq"
RABBITMQ_DEFAULT_PASS: "rabbitmq"
RABBITMQ_DEFAULT_VHOST: "/"
+ deploy:
+ resources:
+ limits:
+ memory: 1000m
+ cpus: '0.5'
ports:
- - 15672:15672
- - 5672:5672
+ - "15672:15672"
+ - "5672:5672"
mysql-to-create-grafana-db:
image: mysql:5.7
platform: linux/x86_64
- mem_limit: 500m
- cpus: 0.5
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
restart: always
ports:
- - 3306:3306
+ - "3306:3306"
environment:
MYSQL_ROOT_PASSWORD: empty
MYSQL_DATABASE: grafana
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
timeout: 20s
@@ -55,8 +65,6 @@ services:
grafana:
image: "grafana/grafana:main"
restart: always
- mem_limit: 500m
- cpus: 0.5
environment:
GF_DATABASE_TYPE: mysql
GF_DATABASE_HOST: mysql
@@ -65,10 +73,15 @@ services:
GF_SECURITY_ADMIN_USER: oncall
GF_SECURITY_ADMIN_PASSWORD: oncall
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
volumes:
- ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin
ports:
- - 3000:3000
+ - "3000:3000"
depends_on:
mysql-to-create-grafana-db:
condition: service_healthy
diff --git a/docker-compose-developer.yml b/docker-compose-developer.yml
index dc2f1179..33ef3fd1 100644
--- a/docker-compose-developer.yml
+++ b/docker-compose-developer.yml
@@ -1,19 +1,21 @@
-version: '3.2'
+version: "3.8"
services:
-
mysql:
image: mysql:5.7
platform: linux/x86_64
- mem_limit: 500m
- cpus: 0.5
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
restart: always
ports:
- - 3306:3306
+ - "3306:3306"
environment:
MYSQL_ROOT_PASSWORD: empty
MYSQL_DATABASE: oncall_local_dev
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
timeout: 20s
@@ -21,23 +23,29 @@ services:
redis:
image: redis
- mem_limit: 100m
- cpus: 0.1
restart: always
ports:
- - 6379:6379
+ - "6379:6379"
+ deploy:
+ resources:
+ limits:
+ memory: 100m
+ cpus: '0.1'
rabbit:
image: "rabbitmq:3.7.15-management"
- mem_limit: 1000m
- cpus: 0.5
environment:
RABBITMQ_DEFAULT_USER: "rabbitmq"
RABBITMQ_DEFAULT_PASS: "rabbitmq"
RABBITMQ_DEFAULT_VHOST: "/"
+ deploy:
+ resources:
+ limits:
+ memory: 1000m
+ cpus: '0.5'
ports:
- - 15672:15672
- - 5672:5672
+ - "15672:15672"
+ - "5672:5672"
mysql-to-create-grafana-db:
image: mysql:5.7
@@ -50,8 +58,6 @@ services:
grafana:
image: "grafana/grafana:main"
restart: always
- mem_limit: 500m
- cpus: 0.5
environment:
GF_DATABASE_TYPE: mysql
GF_DATABASE_HOST: mysql
@@ -60,10 +66,15 @@ services:
GF_SECURITY_ADMIN_USER: oncall
GF_SECURITY_ADMIN_PASSWORD: oncall
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
volumes:
- ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin
ports:
- - 3000:3000
+ - "3000:3000"
depends_on:
mysql:
condition: service_healthy
diff --git a/docker-compose.yml b/docker-compose.yml
index 9caaac8a..a77f5d25 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,28 +1,36 @@
+version: "3.8"
+
+x-environment:
+ &oncall-environment
+ BASE_URL: $DOMAIN
+ SECRET_KEY: $SECRET_KEY
+ RABBITMQ_USERNAME: "rabbitmq"
+ RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD
+ RABBITMQ_HOST: "rabbitmq"
+ RABBITMQ_PORT: "5672"
+ RABBITMQ_DEFAULT_VHOST: "/"
+ MYSQL_PASSWORD: $MYSQL_PASSWORD
+ MYSQL_DB_NAME: oncall_hobby
+ MYSQL_USER: ${MYSQL_USER:-root}
+ MYSQL_HOST: ${MYSQL_HOST:-mysql}
+ MYSQL_PORT: 3306
+ REDIS_URI: redis://redis:6379/0
+ DJANGO_SETTINGS_MODULE: settings.hobby
+ CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery"
+ CELERY_WORKER_CONCURRENCY: "1"
+ CELERY_WORKER_MAX_TASKS_PER_CHILD: "100"
+ CELERY_WORKER_SHUTDOWN_INTERVAL: "65m"
+ CELERY_WORKER_BEAT_ENABLED: "True"
+
services:
engine:
image: grafana/oncall
restart: always
ports:
- - 8080:8080
+ - "8080:8080"
command: >
sh -c "uwsgi --ini uwsgi.ini"
- environment:
- BASE_URL: $DOMAIN
- SECRET_KEY: $SECRET_KEY
- RABBITMQ_USERNAME: "rabbitmq"
- RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD
- RABBITMQ_HOST: "rabbitmq"
- RABBITMQ_PORT: "5672"
- RABBITMQ_DEFAULT_VHOST: "/"
- MYSQL_PASSWORD: $MYSQL_PASSWORD
- MYSQL_DB_NAME: oncall_hobby
- MYSQL_USER: ${MYSQL_USER:-root}
- MYSQL_HOST: ${MYSQL_HOST:-mysql}
- MYSQL_PORT: 3306
- REDIS_URI: redis://redis:6379/0
- DJANGO_SETTINGS_MODULE: settings.hobby
- OSS: "True"
- CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery"
+ environment: *oncall-environment
depends_on:
mysql:
condition: service_healthy
@@ -37,27 +45,7 @@ services:
image: grafana/oncall
restart: always
command: sh -c "./celery_with_exporter.sh"
- environment:
- BASE_URL: $DOMAIN
- SECRET_KEY: $SECRET_KEY
- RABBITMQ_USERNAME: "rabbitmq"
- RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD
- RABBITMQ_HOST: "rabbitmq"
- RABBITMQ_PORT: "5672"
- RABBITMQ_DEFAULT_VHOST: "/"
- MYSQL_PASSWORD: $MYSQL_PASSWORD
- MYSQL_DB_NAME: oncall_hobby
- MYSQL_USER: ${MYSQL_USER:-root}
- MYSQL_HOST: ${MYSQL_HOST:-mysql}
- MYSQL_PORT: 3306
- REDIS_URI: redis://redis:6379/0
- DJANGO_SETTINGS_MODULE: settings.hobby
- OSS: "True"
- CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery"
- CELERY_WORKER_CONCURRENCY: "1"
- CELERY_WORKER_MAX_TASKS_PER_CHILD: "100"
- CELERY_WORKER_SHUTDOWN_INTERVAL: "65m"
- CELERY_WORKER_BEAT_ENABLED: "True"
+ environment: *oncall-environment
depends_on:
mysql:
condition: service_healthy
@@ -71,23 +59,7 @@ services:
oncall_db_migration:
image: grafana/oncall
command: python manage.py migrate --noinput
- environment:
- BASE_URL: $DOMAIN
- SECRET_KEY: $SECRET_KEY
- RABBITMQ_USERNAME: "rabbitmq"
- RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD
- RABBITMQ_HOST: "rabbitmq"
- RABBITMQ_PORT: "5672"
- RABBITMQ_DEFAULT_VHOST: "/"
- MYSQL_PASSWORD: $MYSQL_PASSWORD
- MYSQL_DB_NAME: oncall_hobby
- MYSQL_USER: ${MYSQL_USER:-root}
- MYSQL_HOST: ${MYSQL_HOST:-mysql}
- MYSQL_PORT: 3306
- REDIS_URI: redis://redis:6379/0
- DJANGO_SETTINGS_MODULE: settings.hobby
- OSS: "True"
- CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery"
+ environment: *oncall-environment
depends_on:
mysql:
condition: service_healthy
@@ -97,8 +69,6 @@ services:
mysql:
image: mysql:5.7
platform: linux/x86_64
- mem_limit: 500m
- cpus: 0.5
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
restart: always
expose:
@@ -108,6 +78,11 @@ services:
environment:
MYSQL_ROOT_PASSWORD: $MYSQL_PASSWORD
MYSQL_DATABASE: oncall_hobby
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
healthcheck:
test: "mysql -uroot -p$MYSQL_PASSWORD oncall_hobby -e 'select 1'"
timeout: 20s
@@ -115,24 +90,30 @@ services:
redis:
image: redis
- mem_limit: 100m
- cpus: 0.1
restart: always
expose:
- 6379
+ deploy:
+ resources:
+ limits:
+ memory: 100m
+ cpus: '0.1'
rabbitmq:
image: "rabbitmq:3.7.15-management"
restart: always
hostname: rabbitmq
- mem_limit: 1000m
- cpus: 0.5
volumes:
- rabbitmqdata:/var/lib/rabbitmq
environment:
RABBITMQ_DEFAULT_USER: "rabbitmq"
RABBITMQ_DEFAULT_PASS: $RABBITMQ_PASSWORD
RABBITMQ_DEFAULT_VHOST: "/"
+ deploy:
+ resources:
+ limits:
+ memory: 1000m
+ cpus: '0.5'
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 30s
@@ -152,19 +133,22 @@ services:
grafana:
image: "grafana/grafana:9.0.0-beta3"
restart: always
- mem_limit: 500m
ports:
- - 3000:3000
- cpus: 0.5
+ - "3000:3000"
environment:
GF_DATABASE_TYPE: mysql
GF_DATABASE_HOST: ${MYSQL_HOST:-mysql}
GF_DATABASE_USER: ${MYSQL_USER:-root}
GF_DATABASE_PASSWORD: ${MYSQL_PASSWORD:?err}
GF_SECURITY_ADMIN_USER: ${GRAFANA_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:?err}
+ GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app
GF_INSTALL_PLUGINS: grafana-oncall-app
+ deploy:
+ resources:
+ limits:
+ memory: 500m
+ cpus: '0.5'
depends_on:
mysql_to_create_grafana_db:
condition: service_completed_successfully
diff --git a/docs/sources/oncall-api-reference/alertgroups.md b/docs/sources/oncall-api-reference/alertgroups.md
index 8b673b64..8e90f6d3 100644
--- a/docs/sources/oncall-api-reference/alertgroups.md
+++ b/docs/sources/oncall-api-reference/alertgroups.md
@@ -35,7 +35,8 @@ The above command returns JSON structured in the following way:
"acknowledged_at": null,
"title": "Memory above 90% threshold",
"permalinks": {
- "slack": null
+ "slack": "https://ghostbusters.slack.com/archives/C1H9RESGA/p135854651500008",
+ "telegram": "https://t.me/c/5354/1234?thread=1234"
}
}
]
diff --git a/engine/Dockerfile b/engine/Dockerfile
index 4a736620..8a72ef39 100644
--- a/engine/Dockerfile
+++ b/engine/Dockerfile
@@ -9,8 +9,11 @@ RUN pip install -r requirements.txt
COPY ./ ./
-RUN DJANGO_SETTINGS_MODULE=settings.prod_without_db SECRET_KEY="ThEmUsTSecretKEYforBUILDstage123" TELEGRAM_TOKEN="0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX" SLACK_CLIENT_OAUTH_ID=1 python manage.py collectstatic --no-input
-RUN rm db.sqlite3
+# Collect static files and create an SQLite database
+RUN mkdir -p /var/lib/oncall
+RUN DJANGO_SETTINGS_MODULE=settings.prod_without_db DATABASE_TYPE=sqlite3 DATABASE_NAME=/var/lib/oncall/oncall.db SECRET_KEY="ThEmUsTSecretKEYforBUILDstage123" python manage.py collectstatic --no-input
+RUN chown -R 1000:2000 /var/lib/oncall
+
# This is required for prometheus_client to sync between uwsgi workers
RUN mkdir -p /tmp/prometheus_django_metrics;
diff --git a/engine/apps/alerts/incident_appearance/renderers/email_renderer.py b/engine/apps/alerts/incident_appearance/renderers/email_renderer.py
index 5107988b..eb18e190 100644
--- a/engine/apps/alerts/incident_appearance/renderers/email_renderer.py
+++ b/engine/apps/alerts/incident_appearance/renderers/email_renderer.py
@@ -29,7 +29,7 @@ class AlertGroupEmailRenderer(AlertGroupBaseRenderer):
content = render_to_string(
"email_notification.html",
{
- "url": self.alert_group.permalink or self.alert_group.web_link,
+ "url": self.alert_group.slack_permalink or self.alert_group.web_link,
"title": str_or_backup(templated_alert.title, title_fallback),
"message": str_or_backup(templated_alert.message, ""), # not render message it all if smth go wrong
"amixr_team": self.alert_group.channel.organization,
diff --git a/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py b/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
index c050e4ee..13924771 100644
--- a/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
+++ b/engine/apps/alerts/incident_appearance/renderers/sms_renderer.py
@@ -18,7 +18,9 @@ class AlertGroupSmsRenderer(AlertGroupBaseRenderer):
def render(self):
templated_alert = self.alert_renderer.templated_alert
title = str_or_backup(templated_alert.title, DEFAULT_BACKUP_TITLE)
- if self.alert_group.channel.organization.slack_team_identity and (permalink := self.alert_group.permalink):
+ if self.alert_group.channel.organization.slack_team_identity and (
+ permalink := self.alert_group.slack_permalink
+ ):
incident_link = permalink
else:
incident_link = self.alert_group.web_link
diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py
index 7b4681fd..cb08e8e1 100644
--- a/engine/apps/alerts/models/alert_group.py
+++ b/engine/apps/alerts/models/alert_group.py
@@ -401,15 +401,34 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
raise NotImplementedError
@property
- def permalink(self):
+ def slack_permalink(self):
if self.slack_message is not None:
return self.slack_message.permalink
+ @property
+ def telegram_permalink(self) -> typing.Optional[str]:
+ """
+ This property will attempt to access an attribute, `prefetched_telegram_messages`, representing a list of
+ prefetched telegram messages. If this attribute does not exist, it falls back to performing a query.
+
+ See `apps.public_api.serializers.incidents.IncidentSerializer.PREFETCH_RELATED` as an example.
+ """
+ from apps.telegram.models.message import TelegramMessage
+
+ if hasattr(self, "prefetched_telegram_messages"):
+ return self.prefetched_telegram_messages[0].link if self.prefetched_telegram_messages else None
+
+ main_telegram_message = self.telegram_messages.filter(
+ chat_id__startswith="-", message_type=TelegramMessage.ALERT_GROUP_MESSAGE
+ ).first()
+
+ return main_telegram_message.link if main_telegram_message else None
+
@property
def permalinks(self) -> Permalinks:
- # TODO: refactor 'permalink' property (maybe 'slack_permalink'?) once we add the next permalink
return {
- "slack": self.permalink,
+ "slack": self.slack_permalink,
+ "telegram": self.telegram_permalink,
}
@property
diff --git a/engine/apps/alerts/tasks/notify_ical_schedule_shift.py b/engine/apps/alerts/tasks/notify_ical_schedule_shift.py
index 1a261880..a23f2a4b 100644
--- a/engine/apps/alerts/tasks/notify_ical_schedule_shift.py
+++ b/engine/apps/alerts/tasks/notify_ical_schedule_shift.py
@@ -308,7 +308,7 @@ def notify_ical_schedule_shift(schedule_pk):
new_shifts = sorted(new_shifts, key=lambda shift: shift["start"])
if len(new_shifts) != 0:
- days_to_lookup = (new_shifts[-1]["end"].date() - now.date()).days
+ days_to_lookup = (new_shifts[-1]["end"].date() - now.date()).days + 1
days_to_lookup = max([days_to_lookup, MIN_DAYS_TO_LOOKUP_FOR_THE_END_OF_EVENT])
else:
days_to_lookup = MIN_DAYS_TO_LOOKUP_FOR_THE_END_OF_EVENT
diff --git a/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py b/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
index d6cb6398..eae19ef3 100644
--- a/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
+++ b/engine/apps/alerts/tests/test_notify_ical_schedule_shift.py
@@ -1,4 +1,9 @@
+from datetime import datetime
+from unittest.mock import Mock, patch
+
import pytest
+import pytz
+from django.utils import timezone
from apps.alerts.tasks.notify_ical_schedule_shift import notify_ical_schedule_shift
from apps.schedules.models import OnCallScheduleICal
@@ -9,32 +14,35 @@ PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
-X-WR-CALNAME:t
-X-WR-TIMEZONE:Asia/Yekaterinburg
-BEGIN:VTIMEZONE
-TZID:Asia/Yekaterinburg
-X-LIC-LOCATION:Asia/Yekaterinburg
-BEGIN:STANDARD
-TZOFFSETFROM:+0500
-TZOFFSETTO:+0500
-TZNAME:+05
-DTSTART:19700101T000000
-END:STANDARD
-END:VTIMEZONE
BEGIN:VEVENT
-DTSTART;TZID=Asia/Yekaterinburg:20210124T130000
-DTEND;TZID=Asia/Yekaterinburg:20210124T220000
-RRULE:FREQ=DAILY
-DTSTAMP:20210127T143634Z
-UID:0i0af8p6p8vfampe3r1vkog0jg@google.com
-CREATED:20210127T143553Z
+DTSTART;VALUE=DATE:20211005
+DTEND;VALUE=DATE:20211012
+RRULE:FREQ=WEEKLY;WKST=SU;INTERVAL=7;BYDAY=WE
+DTSTAMP:20210930T125523Z
+UID:id1@google.com
+CREATED:20210928T202349Z
DESCRIPTION:
-LAST-MODIFIED:20210127T143553Z
+LAST-MODIFIED:20210929T204751Z
LOCATION:
-SEQUENCE:0
+SEQUENCE:1
STATUS:CONFIRMED
-SUMMARY:@Bernard Desruisseaux
-TRANSP:OPAQUE
+SUMMARY:user1
+TRANSP:TRANSPARENT
+END:VEVENT
+BEGIN:VEVENT
+DTSTART;VALUE=DATE:20210928
+DTEND;VALUE=DATE:20211005
+RRULE:FREQ=WEEKLY;WKST=SU;INTERVAL=7;BYDAY=WE
+DTSTAMP:20210930T125523Z
+UID:id2@google.com
+CREATED:20210928T202331Z
+DESCRIPTION:
+LAST-MODIFIED:20210929T204744Z
+LOCATION:
+SEQUENCE:2
+STATUS:CONFIRMED
+SUMMARY:user2
+TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
"""
@@ -61,3 +69,36 @@ def test_current_overrides_ical_schedule_is_none(
# this should not raise
notify_ical_schedule_shift(ical_schedule.oncallschedule_ptr_id)
+
+
+@pytest.mark.django_db
+def test_next_shift_notification_long_shifts(
+ make_organization_and_user_with_slack_identities,
+ make_schedule,
+ make_user,
+):
+ organization, _, _, _ = make_organization_and_user_with_slack_identities()
+ make_user(organization=organization, username="user1")
+ make_user(organization=organization, username="user2")
+
+ ical_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleICal,
+ name="test_ical_schedule",
+ channel="channel",
+ ical_url_primary="url",
+ prev_ical_file_primary=ICAL_DATA,
+ cached_ical_file_primary=ICAL_DATA,
+ prev_ical_file_overrides=None,
+ cached_ical_file_overrides=None,
+ )
+
+ with patch.object(timezone, "datetime", Mock(wraps=timezone.datetime)) as mock_tz_datetime:
+ mock_tz_datetime.now.return_value = datetime(2021, 9, 29, 12, 0, tzinfo=pytz.UTC)
+ with patch("apps.slack.slack_client.SlackClientWithErrorHandling.api_call") as mock_slack_api_call:
+ notify_ical_schedule_shift(ical_schedule.oncallschedule_ptr_id)
+
+ slack_blocks = mock_slack_api_call.call_args_list[0][1]["blocks"]
+ notification = slack_blocks[0]["text"]["text"]
+ assert "*New on-call shift:*\nuser2" in notification
+ assert "*Next on-call shift:*\nuser1" in notification
diff --git a/engine/apps/alerts/tests/test_utils.py b/engine/apps/alerts/tests/test_utils.py
index ff19018a..7934f64a 100644
--- a/engine/apps/alerts/tests/test_utils.py
+++ b/engine/apps/alerts/tests/test_utils.py
@@ -12,3 +12,11 @@ def test_request_outgoing_webhook_cannot_resolve_name():
success, err = request_outgoing_webhook("http://something.something/webhook", "GET")
assert success is False
assert err == "Cannot resolve name in url"
+
+
+@pytest.mark.django_db
+def test_request_outgoing_webhook_resolve_name_without_port():
+ with patch("apps.alerts.utils.socket.gethostbyname") as mock_gethostbyname:
+ mock_gethostbyname.return_value = "127.0.0.1"
+ request_outgoing_webhook("http://something.something:9000/webhook", "GET")
+ assert mock_gethostbyname.call_args_list[0].args[0] == "something.something"
diff --git a/engine/apps/alerts/utils.py b/engine/apps/alerts/utils.py
index 58ba22ea..86cbc786 100644
--- a/engine/apps/alerts/utils.py
+++ b/engine/apps/alerts/utils.py
@@ -57,7 +57,7 @@ def request_outgoing_webhook(webhook_url, http_request_type, post_kwargs={}) ->
if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
# Get the ip address of the webhook url and check if it belongs to the private network
try:
- webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
+ webhook_url_ip_address = socket.gethostbyname(parsed_url.hostname)
except socket.gaierror:
return False, "Cannot resolve name in url"
if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
diff --git a/engine/apps/api/serializers/alert_group.py b/engine/apps/api/serializers/alert_group.py
index f9ecf443..a71cfde2 100644
--- a/engine/apps/api/serializers/alert_group.py
+++ b/engine/apps/api/serializers/alert_group.py
@@ -132,7 +132,7 @@ class AlertGroupSerializer(AlertGroupListSerializer):
fields = AlertGroupListSerializer.Meta.fields + [
"alerts",
"render_after_resolve_report_json",
- "permalink",
+ "slack_permalink",
"last_alert_at",
]
diff --git a/engine/apps/api/tests/test_custom_button.py b/engine/apps/api/tests/test_custom_button.py
index 3c358c90..bc91fc60 100644
--- a/engine/apps/api/tests/test_custom_button.py
+++ b/engine/apps/api/tests/test_custom_button.py
@@ -377,3 +377,44 @@ def test_custom_button_action_permissions(
response = client.post(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_get_custom_button_from_other_team_with_flag(
+ make_organization_and_user_with_plugin_token,
+ make_team,
+ make_user_auth_headers,
+ make_custom_action,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ team = make_team(organization)
+
+ custom_button = make_custom_action(organization=organization, team=team)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+ url = f"{url}?from_organization=true"
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_custom_button_from_other_team_without_flag(
+ make_organization_and_user_with_plugin_token,
+ make_team,
+ make_user_auth_headers,
+ make_custom_action,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ team = make_team(organization)
+
+ custom_button = make_custom_action(organization=organization, team=team)
+ client = APIClient()
+
+ url = reverse("api-internal:custom_button-detail", kwargs={"pk": custom_button.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
diff --git a/engine/apps/api/tests/test_schedules.py b/engine/apps/api/tests/test_schedules.py
index ea3a6dc0..13ef0825 100644
--- a/engine/apps/api/tests/test_schedules.py
+++ b/engine/apps/api/tests/test_schedules.py
@@ -10,6 +10,7 @@ from rest_framework.serializers import ValidationError
from rest_framework.test import APIClient
from apps.alerts.models import EscalationPolicy
+from apps.schedules.ical_utils import memoized_users_in_ical
from apps.schedules.models import (
CustomOnCallShift,
OnCallSchedule,
@@ -742,6 +743,8 @@ def test_filter_events_final_schedule(
request_date = start_date
user_a, user_b, user_c, user_d, user_e = (make_user_for_organization(organization, username=i) for i in "ABCDE")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
@@ -837,7 +840,7 @@ def test_next_shifts_per_user(
make_schedule,
make_on_call_shift,
):
- organization, user, token = make_organization_and_user_with_plugin_token()
+ organization, admin, token = make_organization_and_user_with_plugin_token()
client = APIClient()
schedule = make_schedule(
@@ -848,6 +851,8 @@ def test_next_shifts_per_user(
tomorrow = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) + timezone.timedelta(days=1)
user_a, user_b, user_c, user_d = (make_user_for_organization(organization, username=i) for i in "ABCD")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
@@ -860,16 +865,16 @@ def test_next_shifts_per_user(
for user, priority, start_h, duration in shifts:
data = {
"start": tomorrow + timezone.timedelta(hours=start_h),
- "rotation_start": tomorrow,
+ "rotation_start": tomorrow + timezone.timedelta(hours=start_h),
"duration": timezone.timedelta(hours=duration),
"priority_level": priority,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
}
on_call_shift = make_on_call_shift(
- organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
- on_call_shift.users.add(user)
+ on_call_shift.add_rolling_users([[user]])
# override in the past: 17-18 / D
# won't be listed, but user D will still be included in the response
@@ -896,10 +901,10 @@ def test_next_shifts_per_user(
)
override.add_rolling_users([[user_c]])
- # final sdhedule: 7-12: B, 15-16: A, 16-17: B, 17-18: C (override), 18-20: C
+ # final schedule: 7-12: B, 15-16: A, 16-17: B, 17-18: C (override), 18-20: C
url = reverse("api-internal:schedule-next-shifts-per-user", kwargs={"pk": schedule.public_primary_key})
- response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ response = client.get(url, format="json", **make_user_auth_headers(admin, token))
assert response.status_code == status.HTTP_200_OK
expected = {
@@ -980,6 +985,8 @@ def test_merging_same_shift_events(
user_a = make_user_for_organization(organization)
user_b = make_user_for_organization(organization)
user_c = make_user_for_organization(organization, role=Role.VIEWER)
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
data = {
"start": start_date + timezone.timedelta(hours=10),
@@ -1401,3 +1408,54 @@ def test_schedule_mention_options_permissions(
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == expected_status
+
+
+@pytest.mark.django_db
+def test_get_schedule_from_other_team_with_flag(
+ make_organization_and_user_with_plugin_token,
+ make_team,
+ make_user_auth_headers,
+ make_schedule,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ team = make_team(organization)
+
+ calendar_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ name="test_calendar_schedule",
+ team=team,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": calendar_schedule.public_primary_key})
+ url = f"{url}?from_organization=true"
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+
+@pytest.mark.django_db
+def test_get_schedule_from_other_team_without_flag(
+ make_organization_and_user_with_plugin_token,
+ make_team,
+ make_user_auth_headers,
+ make_schedule,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+
+ team = make_team(organization)
+
+ calendar_schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleCalendar,
+ name="test_calendar_schedule",
+ team=team,
+ )
+
+ client = APIClient()
+ url = reverse("api-internal:schedule-detail", kwargs={"pk": calendar_schedule.public_primary_key})
+
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_403_FORBIDDEN
diff --git a/engine/apps/api/tests/test_team.py b/engine/apps/api/tests/test_team.py
index aae7360f..40df30d8 100644
--- a/engine/apps/api/tests/test_team.py
+++ b/engine/apps/api/tests/test_team.py
@@ -3,6 +3,7 @@ from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
+from apps.schedules.models import OnCallScheduleCalendar
from apps.user_management.models import Team
from common.constants.role import Role
@@ -105,28 +106,31 @@ def test_team_permissions_wrong_team_general(
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
+ client = APIClient()
+
team = make_team(organization)
user.teams.add(team)
user.current_team = team
user.save(update_fields=["current_team"])
+ user_from_general_team = make_user(organization=organization)
+
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
- # escalation_chain = make_escalation_chain(organization)
- # schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
- # webhook = make_custom_action(organization)
+ escalation_chain = make_escalation_chain(organization)
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ webhook = make_custom_action(organization)
for endpoint, instance in (
("alertgroup", alert_group),
- # todo: implement team filtering for other resources
- # ("alert_receive_channel", alert_receive_channel),
- # ("escalation_chain", escalation_chain),
- # ("schedule", schedule),
- # ("custom_button", webhook),
+ ("alert_receive_channel", alert_receive_channel),
+ ("escalation_chain", escalation_chain),
+ ("schedule", schedule),
+ ("custom_button", webhook),
+ ("user", user_from_general_team),
):
- client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
@@ -156,25 +160,30 @@ def test_team_permissions_wrong_team(
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
+ client = APIClient()
+
team = make_team(organization)
user.teams.add(team)
+ another_user = make_user(organization=organization)
+ another_user.teams.add(team)
+ another_user.current_team = team
+ another_user.save(update_fields=["current_team"])
+
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
- # escalation_chain = make_escalation_chain(organization, team=team)
- # schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
- # webhook = make_custom_action(organization, team=team)
+ escalation_chain = make_escalation_chain(organization, team=team)
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
+ webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
- # todo: implement team filtering for other resources
- # ("alert_receive_channel", alert_receive_channel),
- # ("escalation_chain", escalation_chain),
- # ("schedule", schedule),
- # ("custom_button", webhook),
+ ("alert_receive_channel", alert_receive_channel),
+ ("escalation_chain", escalation_chain),
+ ("schedule", schedule),
+ ("custom_button", webhook),
):
- client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
@@ -190,6 +199,12 @@ def test_team_permissions_wrong_team(
},
}
+ # Every user belongs to General team
+ url = reverse(f"api-internal:user-detail", kwargs={"pk": another_user.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
@pytest.mark.django_db
def test_team_permissions_not_in_team(
@@ -209,24 +224,29 @@ def test_team_permissions_not_in_team(
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
+ client = APIClient()
+
team = make_team(organization)
+ another_user = make_user(organization=organization)
+ another_user.teams.add(team)
+ another_user.current_team = team
+ another_user.save(update_fields=["current_team"])
+
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
- # escalation_chain = make_escalation_chain(organization, team=team)
- # schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
- # webhook = make_custom_action(organization, team=team)
+ escalation_chain = make_escalation_chain(organization, team=team)
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
+ webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
- # todo: implement team filtering for other resources
- # ("alert_receive_channel", alert_receive_channel),
- # ("escalation_chain", escalation_chain),
- # ("schedule", schedule),
- # ("custom_button", webhook),
+ ("alert_receive_channel", alert_receive_channel),
+ ("escalation_chain", escalation_chain),
+ ("schedule", schedule),
+ ("custom_button", webhook),
):
- client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
@@ -234,6 +254,12 @@ def test_team_permissions_not_in_team(
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {"error_code": "wrong_team"}
+ # Every user belongs to General team
+ url = reverse(f"api-internal:user-detail", kwargs={"pk": another_user.public_primary_key})
+ response = client.get(url, **make_user_auth_headers(user, token))
+
+ assert response.status_code == status.HTTP_200_OK
+
@pytest.mark.django_db
def test_team_permissions_right_team(
@@ -253,28 +279,32 @@ def test_team_permissions_right_team(
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
+ client = APIClient()
+
team = make_team(organization)
user.teams.add(team)
user.current_team = team
user.save(update_fields=["current_team"])
+ another_user = make_user(organization=organization)
+ another_user.teams.add(team)
+
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
- # escalation_chain = make_escalation_chain(organization, team=team)
- # schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
- # webhook = make_custom_action(organization, team=team)
+ escalation_chain = make_escalation_chain(organization, team=team)
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
+ webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
- # todo: implement team filtering for other resources
- # ("alert_receive_channel", alert_receive_channel),
- # ("escalation_chain", escalation_chain),
- # ("schedule", schedule),
- # ("custom_button", webhook),
+ ("alert_receive_channel", alert_receive_channel),
+ ("escalation_chain", escalation_chain),
+ ("schedule", schedule),
+ ("custom_button", webhook),
+ ("user", another_user),
):
- client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
diff --git a/engine/apps/api/views/alert_receive_channel.py b/engine/apps/api/views/alert_receive_channel.py
index 6dce1c56..866620a8 100644
--- a/engine/apps/api/views/alert_receive_channel.py
+++ b/engine/apps/api/views/alert_receive_channel.py
@@ -22,6 +22,7 @@ from common.api_helpers.mixins import (
FilterSerializerMixin,
PreviewTemplateMixin,
PublicPrimaryKeyMixin,
+ TeamFilteringMixin,
UpdateSerializerMixin,
)
from common.exceptions import TeamCanNotBeChangedError, UnableToSendDemoAlert
@@ -58,6 +59,7 @@ class AlertReceiveChannelFilter(filters.FilterSet):
class AlertReceiveChannelView(
PreviewTemplateMixin,
+ TeamFilteringMixin,
PublicPrimaryKeyMixin,
FilterSerializerMixin,
UpdateSerializerMixin,
diff --git a/engine/apps/api/views/custom_button.py b/engine/apps/api/views/custom_button.py
index 0a9f1973..8d2a8082 100644
--- a/engine/apps/api/views/custom_button.py
+++ b/engine/apps/api/views/custom_button.py
@@ -12,11 +12,11 @@ from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission,
from apps.api.serializers.custom_button import CustomButtonSerializer
from apps.auth_token.auth import PluginAuthentication
from common.api_helpers.exceptions import BadRequest
-from common.api_helpers.mixins import PublicPrimaryKeyMixin
+from common.api_helpers.mixins import PublicPrimaryKeyMixin, TeamFilteringMixin
from common.insight_log import EntityEvent, write_resource_insight_log
-class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
+class CustomButtonView(TeamFilteringMixin, PublicPrimaryKeyMixin, ModelViewSet):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated, ActionPermission)
action_permissions = {
@@ -36,7 +36,15 @@ class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
return queryset
def get_object(self):
- # Override this method because we want to get object from organization instead of concrete team.
+ # get the object from the whole organization if there is a flag `get_from_organization=true`
+ # otherwise get the object from the current team
+ get_from_organization = self.request.query_params.get("from_organization", "false") == "true"
+ if get_from_organization:
+ return self.get_object_from_organization()
+ return super().get_object()
+
+ def get_object_from_organization(self):
+ # use this method to get the object from the whole organization instead of the current team
pk = self.kwargs["pk"]
organization = self.request.auth.organization
@@ -50,9 +58,6 @@ class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
return obj
- def original_get_object(self):
- return super().get_object()
-
def perform_create(self, serializer):
serializer.save()
write_resource_insight_log(
@@ -85,7 +90,7 @@ class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
def action(self, request, pk):
alert_group_id = request.query_params.get("alert_group", None)
if alert_group_id is not None:
- custom_button = self.original_get_object()
+ custom_button = self.get_object()
try:
alert_group = AlertGroup.unarchived_objects.get(
public_primary_key=alert_group_id, channel=custom_button.alert_receive_channel
diff --git a/engine/apps/api/views/escalation_chain.py b/engine/apps/api/views/escalation_chain.py
index f972a992..72c73d3a 100644
--- a/engine/apps/api/views/escalation_chain.py
+++ b/engine/apps/api/views/escalation_chain.py
@@ -11,11 +11,11 @@ from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission,
from apps.api.serializers.escalation_chain import EscalationChainListSerializer, EscalationChainSerializer
from apps.auth_token.auth import PluginAuthentication
from common.api_helpers.exceptions import BadRequest
-from common.api_helpers.mixins import ListSerializerMixin, PublicPrimaryKeyMixin
+from common.api_helpers.mixins import ListSerializerMixin, PublicPrimaryKeyMixin, TeamFilteringMixin
from common.insight_log import EntityEvent, write_resource_insight_log
-class EscalationChainViewSet(PublicPrimaryKeyMixin, ListSerializerMixin, viewsets.ModelViewSet):
+class EscalationChainViewSet(TeamFilteringMixin, PublicPrimaryKeyMixin, ListSerializerMixin, viewsets.ModelViewSet):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated, ActionPermission)
diff --git a/engine/apps/api/views/features.py b/engine/apps/api/views/features.py
index fc56ca93..cc69514f 100644
--- a/engine/apps/api/views/features.py
+++ b/engine/apps/api/views/features.py
@@ -27,6 +27,7 @@ class FeaturesAPIView(APIView):
return Response(self._get_enabled_features(request))
def _get_enabled_features(self, request):
+ DynamicSetting = apps.get_model("base", "DynamicSetting")
enabled_features = []
if settings.FEATURE_SLACK_INTEGRATION_ENABLED:
@@ -36,7 +37,6 @@ class FeaturesAPIView(APIView):
enabled_features.append(FEATURE_TELEGRAM)
if settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED:
- DynamicSetting = apps.get_model("base", "DynamicSetting")
mobile_app_settings = DynamicSetting.objects.get_or_create(
name="mobile_app_settings",
defaults={
@@ -59,5 +59,17 @@ class FeaturesAPIView(APIView):
if settings.FEATURE_WEB_SCHEDULES_ENABLED:
enabled_features.append(FEATURE_WEB_SCHEDULES)
+ else:
+ # allow enabling web schedules per org, independently of global status flag
+ enabled_web_schedules_orgs = DynamicSetting.objects.get_or_create(
+ name="enabled_web_schedules_orgs",
+ defaults={
+ "json_value": {
+ "org_ids": [],
+ }
+ },
+ )[0]
+ if request.auth.organization.pk in enabled_web_schedules_orgs.json_value["org_ids"]:
+ enabled_features.append(FEATURE_WEB_SCHEDULES)
return enabled_features
diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py
index ece5186c..4bc5764e 100644
--- a/engine/apps/api/views/schedule.py
+++ b/engine/apps/api/views/schedule.py
@@ -32,6 +32,7 @@ from common.api_helpers.mixins import (
CreateSerializerMixin,
PublicPrimaryKeyMixin,
ShortSerializerMixin,
+ TeamFilteringMixin,
UpdateSerializerMixin,
)
from common.api_helpers.utils import create_engine_url, get_date_range_from_request
@@ -43,7 +44,12 @@ EVENTS_FILTER_BY_FINAL = "final"
class ScheduleView(
- PublicPrimaryKeyMixin, ShortSerializerMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet
+ TeamFilteringMixin,
+ PublicPrimaryKeyMixin,
+ ShortSerializerMixin,
+ CreateSerializerMixin,
+ UpdateSerializerMixin,
+ ModelViewSet,
):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated, ActionPermission)
@@ -124,28 +130,6 @@ class ScheduleView(
queryset = self.serializer_class.setup_eager_loading(queryset)
return queryset
- def get_object(self):
- # Override this method because we want to get object from organization instead of concrete team.
- pk = self.kwargs["pk"]
- organization = self.request.auth.organization
- queryset = organization.oncall_schedules.filter(
- public_primary_key=pk,
- )
- queryset = self._annotate_queryset(queryset)
-
- try:
- obj = queryset.get()
- except ObjectDoesNotExist:
- raise NotFound
-
- # May raise a permission denied
- self.check_object_permissions(self.request, obj)
-
- return obj
-
- def original_get_object(self):
- return super().get_object()
-
def perform_create(self, serializer):
serializer.save()
write_resource_insight_log(instance=serializer.instance, author=self.request.user, event=EntityEvent.CREATED)
@@ -178,6 +162,33 @@ class ScheduleView(
if instance.user_group is not None:
update_slack_user_group_for_schedules.apply_async((instance.user_group.pk,))
+ def get_object(self):
+ # get the object from the whole organization if there is a flag `get_from_organization=true`
+ # otherwise get the object from the current team
+ get_from_organization = self.request.query_params.get("from_organization", "false") == "true"
+ if get_from_organization:
+ return self.get_object_from_organization()
+ return super().get_object()
+
+ def get_object_from_organization(self):
+ # use this method to get the object from the whole organization instead of the current team
+ pk = self.kwargs["pk"]
+ organization = self.request.auth.organization
+ queryset = organization.oncall_schedules.filter(
+ public_primary_key=pk,
+ )
+ queryset = self._annotate_queryset(queryset)
+
+ try:
+ obj = queryset.get()
+ except ObjectDoesNotExist:
+ raise NotFound
+
+ # May raise a permission denied
+ self.check_object_permissions(self.request, obj)
+
+ return obj
+
def get_request_timezone(self):
user_tz = self.request.query_params.get("user_tz", "UTC")
try:
@@ -203,7 +214,7 @@ class ScheduleView(
with_empty = self.request.query_params.get("with_empty", False) == "true"
with_gap = self.request.query_params.get("with_gap", False) == "true"
- schedule = self.original_get_object()
+ schedule = self.get_object()
events = schedule.filter_events(user_tz, date, days=1, with_empty=with_empty, with_gap=with_gap)
slack_channel = (
@@ -235,7 +246,7 @@ class ScheduleView(
raise BadRequest(detail="Invalid type value")
resolve_schedule = filter_by is None or filter_by == EVENTS_FILTER_BY_FINAL
- schedule = self.original_get_object()
+ schedule = self.get_object()
if filter_by is not None and filter_by != EVENTS_FILTER_BY_FINAL:
filter_by = OnCallSchedule.PRIMARY if filter_by == EVENTS_FILTER_BY_ROTATION else OnCallSchedule.OVERRIDES
@@ -259,7 +270,7 @@ class ScheduleView(
user_tz, _ = self.get_request_timezone()
now = timezone.now()
starting_date = now.date()
- schedule = self.original_get_object()
+ schedule = self.get_object()
events = schedule.final_events(user_tz, starting_date, days=30)
users = {u: None for u in schedule.related_users()}
@@ -274,7 +285,7 @@ class ScheduleView(
@action(detail=True, methods=["get"])
def related_escalation_chains(self, request, pk):
"""Return escalation chains associated to schedule."""
- schedule = self.original_get_object()
+ schedule = self.get_object()
escalation_chains = EscalationChain.objects.filter(escalation_policies__notify_schedule=schedule).distinct()
result = [{"name": e.name, "pk": e.public_primary_key} for e in escalation_chains]
@@ -290,7 +301,7 @@ class ScheduleView(
@action(detail=True, methods=["post"])
def reload_ical(self, request, pk):
- schedule = self.original_get_object()
+ schedule = self.get_object()
schedule.drop_cached_ical()
schedule.check_empty_shifts_for_next_week()
schedule.check_gaps_for_next_week()
@@ -302,7 +313,7 @@ class ScheduleView(
@action(detail=True, methods=["get", "post", "delete"])
def export_token(self, request, pk):
- schedule = self.original_get_object()
+ schedule = self.get_object()
if self.request.method == "GET":
try:
diff --git a/engine/apps/api/views/user.py b/engine/apps/api/views/user.py
index 219501f7..cf37b9e7 100644
--- a/engine/apps/api/views/user.py
+++ b/engine/apps/api/views/user.py
@@ -23,6 +23,7 @@ from apps.api.permissions import (
IsAdminOrEditor,
IsOwnerOrAdmin,
)
+from apps.api.serializers.team import TeamSerializer
from apps.api.serializers.user import FilterUserSerializer, UserHiddenFieldsSerializer, UserSerializer
from apps.auth_token.auth import (
MobileAppAuthTokenAuthentication,
@@ -39,7 +40,7 @@ from apps.telegram.client import TelegramClient
from apps.telegram.models import TelegramVerificationCode
from apps.twilioapp.phone_manager import PhoneManager
from apps.twilioapp.twilio_client import twilio_client
-from apps.user_management.models import User
+from apps.user_management.models import Team, User
from common.api_helpers.exceptions import Conflict
from common.api_helpers.mixins import FilterSerializerMixin, PublicPrimaryKeyMixin
from common.api_helpers.paginators import HundredPageSizePaginator
@@ -228,7 +229,10 @@ class UserView(
def retrieve(self, request, *args, **kwargs):
context = {"request": self.request, "format": self.format_kwarg, "view": self}
- instance = self.get_object()
+ try:
+ instance = self.get_object()
+ except NotFound:
+ return self.wrong_team_response()
if settings.OSS_INSTALLATION and live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED:
from apps.oss_installation.models import CloudConnector, CloudUserIdentity
@@ -243,6 +247,28 @@ class UserView(
serializer = self.get_serializer(instance, context=context)
return Response(serializer.data)
+ def wrong_team_response(self):
+ """
+ This method returns 403 and {"error_code": "wrong_team", "owner_team": {"name", "id", "email", "avatar_url"}}.
+ Used in case if a requested instance doesn't belong to user's current_team.
+ Used instead of TeamFilteringMixin because of m2m teams field (mixin doesn't work correctly with this)
+ and overridden retrieve method in UserView.
+ """
+ queryset = User.objects.filter(organization=self.request.user.organization).order_by("id")
+ queryset = self.filter_queryset(queryset)
+
+ try:
+ queryset.get(public_primary_key=self.kwargs["pk"])
+ except ObjectDoesNotExist:
+ raise NotFound
+
+ general_team = Team(public_primary_key=None, name="General", email=None, avatar_url=None)
+
+ return Response(
+ data={"error_code": "wrong_team", "owner_team": TeamSerializer(general_team).data},
+ status=status.HTTP_403_FORBIDDEN,
+ )
+
def current(self, request):
serializer = UserSerializer(self.get_queryset().get(pk=self.request.user.pk))
return Response(serializer.data)
diff --git a/engine/apps/integrations/tests/test_ratelimit.py b/engine/apps/integrations/tests/test_ratelimit.py
index 75e3d903..97b56937 100644
--- a/engine/apps/integrations/tests/test_ratelimit.py
+++ b/engine/apps/integrations/tests/test_ratelimit.py
@@ -8,12 +8,9 @@ from django.urls import reverse
from apps.alerts.models import AlertReceiveChannel
-# Ratelimit keys are stored in cache. Clean it before and after every test to make them idempotent.
-def setup_module(module):
- cache.clear()
-
-
-def teardown_module(module):
+@pytest.fixture(autouse=True)
+def clear_cache():
+ # Ratelimit keys are stored in cache. Clean it before and after every test to make them idempotent.
cache.clear()
diff --git a/engine/apps/public_api/serializers/incidents.py b/engine/apps/public_api/serializers/incidents.py
index 5010f88f..00ec64f6 100644
--- a/engine/apps/public_api/serializers/incidents.py
+++ b/engine/apps/public_api/serializers/incidents.py
@@ -1,6 +1,8 @@
+from django.db.models import Prefetch
from rest_framework import serializers
from apps.alerts.models import AlertGroup
+from apps.telegram.models.message import TelegramMessage
from common.api_helpers.mixins import EagerLoadingMixin
@@ -15,7 +17,14 @@ class IncidentSerializer(EagerLoadingMixin, serializers.ModelSerializer):
state = serializers.SerializerMethodField()
SELECT_RELATED = ["channel", "channel_filter", "slack_message"]
- PREFETCH_RELATED = ["alerts"]
+ PREFETCH_RELATED = [
+ "alerts",
+ Prefetch(
+ "telegram_messages",
+ TelegramMessage.objects.filter(chat_id__startswith="-", message_type=TelegramMessage.ALERT_GROUP_MESSAGE),
+ to_attr="prefetched_telegram_messages",
+ ),
+ ]
class Meta:
model = AlertGroup
diff --git a/engine/apps/public_api/tests/test_incidents.py b/engine/apps/public_api/tests/test_incidents.py
index 360a5e5e..45a5d115 100644
--- a/engine/apps/public_api/tests/test_incidents.py
+++ b/engine/apps/public_api/tests/test_incidents.py
@@ -1,4 +1,5 @@
from unittest import mock
+from unittest.mock import patch
import pytest
from django.urls import reverse
@@ -40,6 +41,7 @@ def construct_expected_response_from_incidents(incidents):
"title": None,
"permalinks": {
"slack": None,
+ "telegram": None,
},
}
)
@@ -186,6 +188,24 @@ def test_delete_incident_invalid_request(incident_public_api_setup):
assert response.status_code == status.HTTP_400_BAD_REQUEST
+@pytest.mark.django_db
+def test_pagination(settings, incident_public_api_setup):
+ settings.BASE_URL = "https://test.com/test/prefixed/urls"
+
+ token, incidents, _, _ = incident_public_api_setup
+ client = APIClient()
+
+ url = reverse("api-public:alert_groups-list")
+
+ with patch("common.api_helpers.paginators.PathPrefixedPagination.get_page_size", return_value=1):
+ response = client.get(url, HTTP_AUTHORIZATION=f"{token}")
+
+ assert response.status_code == status.HTTP_200_OK
+ result = response.json()
+
+ assert result["next"].startswith("https://test.com/test/prefixed/urls")
+
+
# This is test from old django-based tests
# TODO: uncomment with date checking in delete mode
# def test_delete_incident_invalid_date(self):
diff --git a/engine/apps/schedules/models/custom_on_call_shift.py b/engine/apps/schedules/models/custom_on_call_shift.py
index 232c824a..db9ca5d3 100644
--- a/engine/apps/schedules/models/custom_on_call_shift.py
+++ b/engine/apps/schedules/models/custom_on_call_shift.py
@@ -5,6 +5,7 @@ from calendar import monthrange
from uuid import uuid4
import pytz
+from dateutil import relativedelta
from django.apps import apps
from django.conf import settings
from django.core.validators import MinLengthValidator
@@ -353,6 +354,13 @@ class CustomOnCallShift(models.Model):
ONE_DAY = 1
ONE_HOUR = 1
+ def add_months(year, month, months_add):
+ """
+ Utility method for month calculation. E.g. (2022, 12) + 1 month = (2023, 1)
+ """
+ dt = timezone.datetime.min.replace(year=year, month=month) + relativedelta.relativedelta(months=months_add)
+ return dt.year, dt.month
+
current_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
interval = self.interval or 1
@@ -385,7 +393,8 @@ class CustomOnCallShift(models.Model):
days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
# count next event start date with respect to event interval
for i in range(1, interval):
- next_month_days = monthrange(current_event_start.year, current_event_start.month + i)[1]
+ year, month = add_months(current_event_start.year, current_event_start.month, i)
+ next_month_days = monthrange(year, month)[1]
days_for_next_event += next_month_days
next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
@@ -411,8 +420,7 @@ class CustomOnCallShift(models.Model):
repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
current_event, next_event_start.replace(microsecond=0)
)
- ical_iter = repetitions.__iter__()
- for event in ical_iter:
+ for event in repetitions.__iter__():
if end_date: # end_date exists for long events with frequency weekly and monthly
if end_date >= event.start >= next_event_start:
if (
@@ -451,8 +459,7 @@ class CustomOnCallShift(models.Model):
repetitions = UnfoldableCalendar(initial_event).RepeatedEvent(
initial_event, initial_event_start.replace(microsecond=0)
)
- ical_iter = repetitions.__iter__()
- for event in ical_iter:
+ for event in repetitions.__iter__():
if event.start > date:
break
last_event = event
diff --git a/engine/apps/schedules/models/on_call_schedule.py b/engine/apps/schedules/models/on_call_schedule.py
index b3d72911..24aa8d0d 100644
--- a/engine/apps/schedules/models/on_call_schedule.py
+++ b/engine/apps/schedules/models/on_call_schedule.py
@@ -654,6 +654,7 @@ class OnCallScheduleWeb(OnCallSchedule):
for g in rolling_groups
if g is not None
),
+ set(),
)
return users
diff --git a/engine/apps/schedules/tests/test_custom_on_call_shift.py b/engine/apps/schedules/tests/test_custom_on_call_shift.py
index dd3cfba6..4dbf6029 100644
--- a/engine/apps/schedules/tests/test_custom_on_call_shift.py
+++ b/engine/apps/schedules/tests/test_custom_on_call_shift.py
@@ -354,11 +354,11 @@ def test_rolling_users_event_with_interval_monthly(
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
- start_date = timezone.now().replace(day=1, microsecond=0)
- days_for_next_month_1 = monthrange(start_date.year, start_date.month)[1]
- days_for_next_month_2 = monthrange(start_date.year, start_date.month + 1)[1] + days_for_next_month_1
- days_for_next_month_3 = monthrange(start_date.year, start_date.month + 2)[1] + days_for_next_month_2
- days_for_next_month_4 = monthrange(start_date.year, start_date.month + 3)[1] + days_for_next_month_3
+ start_date = timezone.datetime(year=2022, month=10, day=1, hour=10, minute=30)
+ days_for_next_month_1 = monthrange(2022, 10)[1]
+ days_for_next_month_2 = monthrange(2022, 11)[1] + days_for_next_month_1
+ days_for_next_month_3 = monthrange(2022, 12)[1] + days_for_next_month_2
+ days_for_next_month_4 = monthrange(2023, 1)[1] + days_for_next_month_3
data = {
"priority_level": 1,
@@ -718,19 +718,19 @@ def test_rolling_users_with_diff_start_and_rotation_start_monthly(
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
- now = timezone.now().replace(day=1, microsecond=0)
- days_in_curr_month = monthrange(now.year, now.month)[1]
- days_in_next_month = monthrange(now.year, now.month + 1)[1]
+ start_date = timezone.datetime(year=2022, month=12, day=1, hour=10, minute=30)
+ days_in_curr_month = monthrange(2022, 12)[1]
+ days_in_next_month = monthrange(2023, 1)[1]
data = {
"priority_level": 1,
- "start": now,
- "week_start": now.weekday(),
- "rotation_start": now + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
+ "start": start_date,
+ "week_start": start_date.weekday(),
+ "rotation_start": start_date + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
"schedule": schedule,
- "until": now + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
+ "until": start_date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
@@ -738,7 +738,7 @@ def test_rolling_users_with_diff_start_and_rotation_start_monthly(
)
on_call_shift.add_rolling_users(rolling_users)
- date = now + timezone.timedelta(minutes=5)
+ date = start_date + timezone.timedelta(minutes=5)
# rotation starts from user_2, because user_1 started earlier than rotation start date
user_2_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month)]
user_3_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month + days_in_next_month)]
@@ -774,9 +774,9 @@ def test_rolling_users_with_diff_start_and_rotation_start_monthly_by_monthday(
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
- start_date = timezone.now().replace(day=1, microsecond=0)
- days_in_curr_month = monthrange(start_date.year, start_date.month)[1]
- days_in_next_month = monthrange(start_date.year, start_date.month + 1)[1]
+ start_date = timezone.datetime(year=2022, month=12, day=1, hour=10, minute=30)
+ days_in_curr_month = monthrange(2022, 12)[1]
+ days_in_next_month = monthrange(2023, 1)[1]
data = {
"priority_level": 1,
diff --git a/engine/apps/schedules/tests/test_on_call_schedule.py b/engine/apps/schedules/tests/test_on_call_schedule.py
index e8da0e67..8bb079f4 100644
--- a/engine/apps/schedules/tests/test_on_call_schedule.py
+++ b/engine/apps/schedules/tests/test_on_call_schedule.py
@@ -4,6 +4,7 @@ import pytest
import pytz
from django.utils import timezone
+from apps.schedules.ical_utils import memoized_users_in_ical
from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleWeb
from common.constants.role import Role
@@ -236,6 +237,8 @@ def test_filter_events_ical_all_day(make_organization, make_user_for_organizatio
schedule.cached_ical_file_primary = calendar.to_ical()
for u in ("@Bernard Desruisseaux", "@Bob", "@Alex"):
make_user_for_organization(organization, username=u)
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
day_to_check_iso = "2021-01-27T15:27:14.448059+00:00"
parsed_iso_day_to_check = datetime.datetime.fromisoformat(day_to_check_iso).replace(tzinfo=pytz.UTC)
@@ -268,6 +271,8 @@ def test_final_schedule_events(make_organization, make_user_for_organization, ma
start_date = now - timezone.timedelta(days=7)
user_a, user_b, user_c, user_d, user_e = (make_user_for_organization(organization, username=i) for i in "ABCDE")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
@@ -367,6 +372,8 @@ def test_final_schedule_splitting_events(
start_date = now - timezone.timedelta(days=7)
user_a, user_b, user_c = (make_user_for_organization(organization, username=i) for i in "ABC")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
@@ -434,6 +441,8 @@ def test_final_schedule_splitting_same_time_events(
start_date = now - timezone.timedelta(days=7)
user_a, user_b, user_c = (make_user_for_organization(organization, username=i) for i in "ABC")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
@@ -742,6 +751,19 @@ def test_preview_override_shift(make_organization, make_user_for_organization, m
assert schedule._ical_file_overrides == schedule_overrides_ical
+@pytest.mark.django_db
+def test_schedule_related_users_empty_schedule(make_organization, make_schedule):
+ organization = make_organization()
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleWeb,
+ name="test_web_schedule",
+ )
+
+ users = schedule.related_users()
+ assert users == set()
+
+
@pytest.mark.django_db
def test_schedule_related_users(make_organization, make_user_for_organization, make_on_call_shift, make_schedule):
organization = make_organization()
@@ -755,6 +777,8 @@ def test_schedule_related_users(make_organization, make_user_for_organization, m
start_date = now - timezone.timedelta(days=7)
user_a, _, _, user_d, user_e = (make_user_for_organization(organization, username=i) for i in "ABCDE")
+ # clear users pks <-> organization cache (persisting between tests)
+ memoized_users_in_ical.cache_clear()
shifts = (
# user, priority, start time (h), duration (hs)
diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py
index 7b772a0e..8a335fcd 100644
--- a/engine/apps/slack/scenarios/alertgroup_appearance.py
+++ b/engine/apps/slack/scenarios/alertgroup_appearance.py
@@ -57,7 +57,7 @@ class OpenAlertAppearanceDialogStep(
# This is a special case for amazon sns notifications in str format CHEKED
if (
- AlertReceiveChannel.INTEGRATION_AMAZON_SNS is not None
+ hasattr(AlertReceiveChannel, "INTEGRATION_AMAZON_SNS")
and alert_group.channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS
and raw_request_data == "{}"
):
diff --git a/engine/apps/slack/scenarios/distribute_alerts.py b/engine/apps/slack/scenarios/distribute_alerts.py
index 109577b1..91814f6d 100644
--- a/engine/apps/slack/scenarios/distribute_alerts.py
+++ b/engine/apps/slack/scenarios/distribute_alerts.py
@@ -354,9 +354,9 @@ class SelectAttachGroupStep(
f"attached incidents ({attached_incidents.count()}):\n"
)
for dependent_alert in attached_incidents:
- if dependent_alert.permalink:
+ if dependent_alert.slack_permalink:
dependent_alert_text = (
- f"\n<{dependent_alert.permalink}|{dependent_alert.long_verbose_name_without_formatting}>"
+ f"\n<{dependent_alert.slack_permalink}|{dependent_alert.long_verbose_name_without_formatting}>"
)
else:
dependent_alert_text = f"\n{dependent_alert.long_verbose_name}"
diff --git a/engine/apps/slack/scenarios/resolution_note.py b/engine/apps/slack/scenarios/resolution_note.py
index cec692c6..c22a14ad 100644
--- a/engine/apps/slack/scenarios/resolution_note.py
+++ b/engine/apps/slack/scenarios/resolution_note.py
@@ -7,6 +7,7 @@ from django.utils import timezone
from apps.slack.scenarios import scenario_step
from apps.slack.slack_client.exceptions import SlackAPIException
+from apps.user_management.models import User
from common.api_helpers.utils import create_engine_url
from .step_mixins import CheckAlertIsUnarchivedMixin
@@ -107,10 +108,18 @@ class AddToResolutionNoteStep(CheckAlertIsUnarchivedMixin, scenario_step.Scenari
channel_id=channel_id,
)
alert_group = slack_message.get_alert_group()
- author_slack_user_identity = SlackUserIdentity.objects.get(
- slack_id=payload["message"]["user"], slack_team_identity=slack_team_identity
- )
- author_user = self.organization.users.get(slack_user_identity=author_slack_user_identity)
+ try:
+ author_slack_user_identity = SlackUserIdentity.objects.get(
+ slack_id=payload["message"]["user"], slack_team_identity=slack_team_identity
+ )
+ author_user = self.organization.users.get(slack_user_identity=author_slack_user_identity)
+ except (SlackUserIdentity.DoesNotExist, User.DoesNotExist):
+ warning_text = (
+ "Unable to add this message to resolution note: could not find corresponding "
+ "OnCall user for message author: {}".format(payload["message"]["user"])
+ )
+ self.open_warning_window(payload, warning_text)
+ return
resolution_note_slack_message = ResolutionNoteSlackMessage(
alert_group=alert_group,
user=author_user,
@@ -121,6 +130,7 @@ class AddToResolutionNoteStep(CheckAlertIsUnarchivedMixin, scenario_step.Scenari
ts=message_ts,
permalink=permalink,
)
+
resolution_note_slack_message.added_to_resolution_note = True
resolution_note_slack_message.save()
resolution_note = resolution_note_slack_message.get_resolution_note()
diff --git a/engine/common/api_helpers/paginators.py b/engine/common/api_helpers/paginators.py
index 01ce2cc6..2a3ad974 100644
--- a/engine/common/api_helpers/paginators.py
+++ b/engine/common/api_helpers/paginators.py
@@ -1,19 +1,33 @@
from rest_framework.pagination import CursorPagination, PageNumberPagination
+from common.api_helpers.utils import create_engine_url
-class HundredPageSizePaginator(PageNumberPagination):
+
+class PathPrefixedPagination(PageNumberPagination):
+ def paginate_queryset(self, queryset, request, view=None):
+ request.build_absolute_uri = lambda: create_engine_url(request.get_full_path())
+ return super().paginate_queryset(queryset, request, view)
+
+
+class PathPrefixedCursorPagination(CursorPagination):
+ def paginate_queryset(self, queryset, request, view=None):
+ request.build_absolute_uri = lambda: create_engine_url(request.get_full_path())
+ return super().paginate_queryset(queryset, request, view)
+
+
+class HundredPageSizePaginator(PathPrefixedPagination):
page_size = 100
-class FiftyPageSizePaginator(PageNumberPagination):
+class FiftyPageSizePaginator(PathPrefixedPagination):
page_size = 50
-class TwentyFivePageSizePaginator(PageNumberPagination):
+class TwentyFivePageSizePaginator(PathPrefixedPagination):
page_size = 25
-class TwentyFiveCursorPaginator(CursorPagination):
+class TwentyFiveCursorPaginator(PathPrefixedCursorPagination):
page_size = 25
max_page_size = 100
page_size_query_param = "perpage"
diff --git a/engine/config_integrations/alertmanager.py b/engine/config_integrations/alertmanager.py
index bfdcff2e..cc356e26 100644
--- a/engine/config_integrations/alertmanager.py
+++ b/engine/config_integrations/alertmanager.py
@@ -9,7 +9,7 @@ is_able_to_autoresolve = True
is_demo_alert_enabled = True
description = """
-Alerts from Grafana Alertmanager are automatically routed to this integration."
+Alerts from Grafana Alertmanager are automatically routed to this integration.
{% for dict_item in grafana_alerting_entities %}
Click here
to open contact point, and
diff --git a/engine/config_integrations/grafana_alerting.py b/engine/config_integrations/grafana_alerting.py
index e8942b1e..4eac0135 100644
--- a/engine/config_integrations/grafana_alerting.py
+++ b/engine/config_integrations/grafana_alerting.py
@@ -12,7 +12,7 @@ is_able_to_autoresolve = True
is_demo_alert_enabled = True
description = """ \
-Alerts from Grafana Alertmanager are automatically routed to this integration."
+Alerts from Grafana Alertmanager are automatically routed to this integration.
{% for dict_item in grafana_alerting_entities %}
Click here
to open contact point, and
diff --git a/engine/requirements.txt b/engine/requirements.txt
index 1bf66e51..950c1d1e 100644
--- a/engine/requirements.txt
+++ b/engine/requirements.txt
@@ -5,8 +5,8 @@ whitenoise==5.3.0
twilio~=6.37.0
phonenumbers==8.10.0
django-ordered-model==3.1.1
-celery==5.2.7
-redis==3.2.0
+celery[amqp,redis]==5.2.7
+redis==3.4.1
humanize==0.5.1
uwsgi==2.0.20
django-cors-headers==3.7.0
@@ -24,7 +24,7 @@ slack-export-viewer==1.0.0
beautifulsoup4==4.8.1
social-auth-app-django==3.1.0
sendgrid==6.1.2
-cryptography==3.2
+cryptography==3.3.2
pytest==5.4.3
pytest-django==3.9.0
pytest_factoryboy==2.0.3
diff --git a/engine/settings/base.py b/engine/settings/base.py
index 3f893246..d9ec9f36 100644
--- a/engine/settings/base.py
+++ b/engine/settings/base.py
@@ -64,9 +64,6 @@ TWILIO_VERIFY_SERVICE_SID = os.environ.get("TWILIO_VERIFY_SERVICE_SID")
TELEGRAM_WEBHOOK_HOST = os.environ.get("TELEGRAM_WEBHOOK_HOST", BASE_URL)
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
-os.environ.setdefault("MYSQL_PASSWORD", "empty")
-os.environ.setdefault("RABBIT_URI", "empty")
-
# For Sending email
SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY")
SENDGRID_FROM_EMAIL = os.environ.get("SENDGRID_FROM_EMAIL")
@@ -84,21 +81,101 @@ GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None)
# Outgoing webhook settings
DANGEROUS_WEBHOOKS_ENABLED = getenv_boolean("DANGEROUS_WEBHOOKS_ENABLED", default=False)
-# DB backend defaults
-DB_BACKEND = os.environ.get("DB_BACKEND", "mysql")
-DB_BACKEND_DEFAULT_VALUES = {
- "mysql": {
+
+# Database
+class DatabaseTypes:
+ MYSQL = "mysql"
+ POSTGRESQL = "postgresql"
+ SQLITE3 = "sqlite3"
+
+
+DATABASE_DEFAULTS = {
+ DatabaseTypes.MYSQL: {
"USER": "root",
- "PORT": "3306",
+ "PORT": 3306,
+ },
+ DatabaseTypes.POSTGRESQL: {
+ "USER": "postgres",
+ "PORT": 5432,
+ },
+}
+
+DATABASE_NAME = os.getenv("DATABASE_NAME") or os.getenv("MYSQL_DB_NAME")
+DATABASE_USER = os.getenv("DATABASE_USER") or os.getenv("MYSQL_USER")
+DATABASE_PASSWORD = os.getenv("DATABASE_PASSWORD") or os.getenv("MYSQL_PASSWORD")
+DATABASE_HOST = os.getenv("DATABASE_HOST") or os.getenv("MYSQL_HOST")
+DATABASE_PORT = os.getenv("DATABASE_PORT") or os.getenv("MYSQL_PORT")
+
+DATABASE_TYPE = os.getenv("DATABASE_TYPE", DatabaseTypes.MYSQL).lower()
+assert DATABASE_TYPE in {DatabaseTypes.MYSQL, DatabaseTypes.POSTGRESQL, DatabaseTypes.SQLITE3}
+
+DATABASE_ENGINE = f"django.db.backends.{DATABASE_TYPE}"
+
+DATABASE_CONFIGS = {
+ DatabaseTypes.SQLITE3: {
+ "ENGINE": DATABASE_ENGINE,
+ "NAME": DATABASE_NAME or "/var/lib/oncall/oncall.db",
+ },
+ DatabaseTypes.MYSQL: {
+ "ENGINE": DATABASE_ENGINE,
+ "NAME": DATABASE_NAME,
+ "USER": DATABASE_USER,
+ "PASSWORD": DATABASE_PASSWORD,
+ "HOST": DATABASE_HOST,
+ "PORT": DATABASE_PORT,
"OPTIONS": {
"charset": "utf8mb4",
"connect_timeout": 1,
},
},
- "postgresql": {
- "USER": "postgres",
- "PORT": "5432",
- "OPTIONS": {},
+ DatabaseTypes.POSTGRESQL: {
+ "ENGINE": DATABASE_ENGINE,
+ "NAME": DATABASE_NAME,
+ "USER": DATABASE_USER,
+ "PASSWORD": DATABASE_PASSWORD,
+ "HOST": DATABASE_HOST,
+ "PORT": DATABASE_PORT,
+ },
+}
+
+DATABASES = {
+ "default": DATABASE_CONFIGS[DATABASE_TYPE],
+}
+if DATABASE_TYPE == DatabaseTypes.MYSQL:
+ # Workaround to use pymysql instead of mysqlclient
+ import pymysql
+
+ pymysql.install_as_MySQLdb()
+
+# Redis
+REDIS_USERNAME = os.getenv("REDIS_USERNAME", "")
+REDIS_PASSWORD = os.getenv("REDIS_PASSWORD")
+REDIS_HOST = os.getenv("REDIS_HOST")
+REDIS_PORT = os.getenv("REDIS_PORT", 6379)
+REDIS_PROTOCOL = os.getenv("REDIS_PROTOCOL", "redis")
+
+REDIS_URI = os.getenv("REDIS_URI")
+if not REDIS_URI:
+ REDIS_URI = f"{REDIS_PROTOCOL}://{REDIS_USERNAME}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}"
+
+# Cache
+CACHES = {
+ "default": {
+ "BACKEND": "redis_cache.RedisCache",
+ "LOCATION": [
+ REDIS_URI,
+ ],
+ "OPTIONS": {
+ "DB": 1,
+ "PARSER_CLASS": "redis.connection.HiredisParser",
+ "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
+ "CONNECTION_POOL_CLASS_KWARGS": {
+ "max_connections": 50,
+ "timeout": 20,
+ },
+ "MAX_CONNECTIONS": 1000,
+ "PICKLE_VERSION": -1,
+ },
},
}
@@ -261,7 +338,34 @@ USE_TZ = True
STATIC_URL = os.environ.get("STATIC_URL", "/static/")
STATIC_ROOT = "./static/"
-CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@localhost:5672"
+# RabbitMQ
+RABBITMQ_USERNAME = os.getenv("RABBITMQ_USERNAME")
+RABBITMQ_PASSWORD = os.getenv("RABBITMQ_PASSWORD")
+RABBITMQ_HOST = os.getenv("RABBITMQ_HOST")
+RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672)
+RABBITMQ_PROTOCOL = os.getenv("RABBITMQ_PROTOCOL", "amqp")
+RABBITMQ_VHOST = os.getenv("RABBITMQ_VHOST", "")
+
+RABBITMQ_URI = os.getenv("RABBITMQ_URI") or os.getenv("RABBIT_URI")
+if not RABBITMQ_URI:
+ RABBITMQ_URI = f"{RABBITMQ_PROTOCOL}://{RABBITMQ_USERNAME}:{RABBITMQ_PASSWORD}@{RABBITMQ_HOST}:{RABBITMQ_PORT}/{RABBITMQ_VHOST}"
+
+
+# Celery
+class BrokerTypes:
+ RABBITMQ = "rabbitmq"
+ REDIS = "redis"
+
+
+BROKER_TYPE = os.getenv("BROKER_TYPE", BrokerTypes.RABBITMQ).lower()
+assert BROKER_TYPE in {BrokerTypes.RABBITMQ, BrokerTypes.REDIS}
+
+if BROKER_TYPE == BrokerTypes.RABBITMQ:
+ CELERY_BROKER_URL = RABBITMQ_URI
+elif BROKER_TYPE == BrokerTypes.REDIS:
+ CELERY_BROKER_URL = REDIS_URI
+else:
+ raise ValueError(f"Invalid BROKER_TYPE env variable: {BROKER_TYPE}")
# By default, apply_async will just hang indefinitely trying to reach to RabbitMQ even if RabbitMQ is down.
# This makes apply_async retry 3 times trying to reach to RabbitMQ, with some extra info on periods between retries.
diff --git a/engine/settings/ci-test.py b/engine/settings/ci-test.py
index b3d39d4e..7af883d3 100644
--- a/engine/settings/ci-test.py
+++ b/engine/settings/ci-test.py
@@ -1,6 +1,6 @@
-# flake8: noqa: F405
+# flake8: noqa
-from .base import * # noqa
+from .base import *
SECRET_KEY = "u5/IIbuiJR3Y9FQMBActk+btReZ5oOxu+l8MIJQWLfVzESoan5REE6UNSYYEQdjBOcty9CDak2X"
@@ -9,27 +9,29 @@ MIRAGE_CIPHER_IV = "X+VFcDqtxJ5bbU+V"
BASE_URL = "http://localhost"
-CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@rabbit_test:5672"
+if DATABASE_TYPE == DatabaseTypes.SQLITE3:
+ DATABASES["default"]["NAME"] = DATABASE_NAME or "oncall_ci.db"
+else:
+ DATABASES["default"] |= {
+ "NAME": DATABASE_NAME or "oncall_local_dev",
+ "USER": DATABASE_USER or DATABASE_DEFAULTS[DATABASE_TYPE]["USER"],
+ "PASSWORD": DATABASE_PASSWORD or "local_dev_pwd",
+ "HOST": DATABASE_HOST or f"{DATABASE_TYPE}_test",
+ "PORT": DATABASE_PORT or DATABASE_DEFAULTS[DATABASE_TYPE]["PORT"],
+ }
-if DB_BACKEND == "mysql":
- # Workaround to use pymysql instead of mysqlclient
- import pymysql
+if BROKER_TYPE == BrokerTypes.RABBITMQ:
+ CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@rabbit_test:5672"
+elif BROKER_TYPE == BrokerTypes.REDIS:
+ CELERY_BROKER_URL = REDIS_URI
- pymysql.install_as_MySQLdb()
- DB_BACKEND_DEFAULT_VALUES[DB_BACKEND]["OPTIONS"] = {"charset": "utf8mb4"}
-
-
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.{}".format(DB_BACKEND),
- "NAME": os.environ.get("DB_NAME", "oncall_local_dev"),
- "USER": os.environ.get("DB_USER", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("USER", "root")),
- "PASSWORD": "local_dev_pwd",
- "HOST": "{}_test".format(DB_BACKEND),
- "PORT": os.environ.get("DB_PORT", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("PORT", "3306")),
- "OPTIONS": DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("OPTIONS", {}),
- },
-}
+# use redis as cache and celery broker on CI tests
+if BROKER_TYPE != BrokerTypes.REDIS:
+ CACHES = {
+ "default": {
+ "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
+ }
+ }
# Dummy Telegram token (fake one)
TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
diff --git a/engine/settings/dev.py b/engine/settings/dev.py
index fb7ddc3e..9c418ae4 100644
--- a/engine/settings/dev.py
+++ b/engine/settings/dev.py
@@ -1,27 +1,28 @@
+# flake8: noqa
import os
import sys
-from .base import * # noqa
-
-if DB_BACKEND == "mysql": # noqa
- # Workaround to use pymysql instead of mysqlclient
- import pymysql
-
- pymysql.install_as_MySQLdb()
+from .base import *
DEBUG = True
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.{}".format(DB_BACKEND), # noqa
- "NAME": os.environ.get("DB_NAME", "oncall_local_dev"),
- "USER": os.environ.get("DB_USER", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("USER", "root")), # noqa
- "PASSWORD": os.environ.get("DB_PASSWORD", "empty"),
- "HOST": os.environ.get("DB_HOST", "127.0.0.1"),
- "PORT": os.environ.get("DB_PORT", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("PORT", "3306")), # noqa
- "OPTIONS": DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("OPTIONS", {}), # noqa
- },
-}
+if DATABASE_TYPE == DatabaseTypes.SQLITE3:
+ DATABASES["default"]["NAME"] = DATABASE_NAME or "oncall_dev.db"
+else:
+ DATABASES["default"] |= {
+ "NAME": DATABASE_NAME or "oncall_local_dev",
+ "USER": DATABASE_USER or DATABASE_DEFAULTS[DATABASE_TYPE]["USER"],
+ "PASSWORD": DATABASE_PASSWORD or "empty",
+ "HOST": DATABASE_HOST or "127.0.0.1",
+ "PORT": DATABASE_PORT or DATABASE_DEFAULTS[DATABASE_TYPE]["PORT"],
+ }
+
+if BROKER_TYPE == BrokerTypes.RABBITMQ:
+ CELERY_BROKER_URL = "pyamqp://rabbitmq:rabbitmq@localhost:5672"
+elif BROKER_TYPE == BrokerTypes.REDIS:
+ CELERY_BROKER_URL = "redis://localhost:6379"
+
+CACHES["default"]["LOCATION"] = ["localhost:6379"]
SECRET_KEY = os.environ.get("SECRET_KEY", "osMsNM0PqlRHBlUvqmeJ7+ldU3IUETCrY9TrmiViaSmInBHolr1WUlS0OFS4AHrnnkp1vp9S9z1")
@@ -32,28 +33,6 @@ MIRAGE_CIPHER_IV = os.environ.get("MIRAGE_CIPHER_IV", "tZZa+60zTZO2NRcS")
TESTING = "pytest" in sys.modules or "unittest" in sys.modules
-CACHES = {
- "default": {
- "BACKEND": "redis_cache.RedisCache",
- "LOCATION": [
- "localhost:6379",
- ],
- "OPTIONS": {
- "DB": 1,
- "PARSER_CLASS": "redis.connection.HiredisParser",
- "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
- "CONNECTION_POOL_CLASS_KWARGS": {
- "max_connections": 50,
- "timeout": 20,
- },
- "MAX_CONNECTIONS": 1000,
- "PICKLE_VERSION": -1,
- },
- },
-}
-
-CELERY_BROKER_URL = "pyamqp://rabbitmq:rabbitmq@localhost:5672"
-
SILKY_PYTHON_PROFILER = True
# For any requests that come in with that header/value, request.is_secure() will return True.
diff --git a/engine/settings/helm.py b/engine/settings/helm.py
index 3f9f1a62..6ae28e8a 100644
--- a/engine/settings/helm.py
+++ b/engine/settings/helm.py
@@ -1,62 +1,4 @@
-import os
-
-# Workaround to use pymysql instead of mysqlclient
-import pymysql
-
-from .prod_without_db import * # noqa
-
-pymysql.install_as_MySQLdb()
-
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.mysql",
- "NAME": os.environ.get("MYSQL_DB_NAME"),
- "USER": os.environ.get("MYSQL_USER"),
- "PASSWORD": os.environ["MYSQL_PASSWORD"],
- "HOST": os.environ.get("MYSQL_HOST"),
- "PORT": os.environ.get("MYSQL_PORT"),
- "OPTIONS": {
- "charset": "utf8mb4",
- "connect_timeout": 1,
- },
- },
-}
-
-RABBITMQ_USERNAME = os.environ.get("RABBITMQ_USERNAME")
-RABBITMQ_PASSWORD = os.environ.get("RABBITMQ_PASSWORD")
-RABBITMQ_HOST = os.environ.get("RABBITMQ_HOST")
-RABBITMQ_PORT = os.environ.get("RABBITMQ_PORT")
-RABBITMQ_PROTOCOL = os.environ.get("RABBITMQ_PROTOCOL")
-RABBITMQ_VHOST = os.environ.get("RABBITMQ_VHOST", "")
-
-CELERY_BROKER_URL = (
- f"{RABBITMQ_PROTOCOL}://{RABBITMQ_USERNAME}:{RABBITMQ_PASSWORD}@{RABBITMQ_HOST}:{RABBITMQ_PORT}/{RABBITMQ_VHOST}"
-)
-
-REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD")
-REDIS_HOST = os.environ.get("REDIS_HOST")
-REDIS_PORT = os.environ.get("REDIS_PORT", "6379")
-REDIS_URI = f"redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}"
-
-CACHES = {
- "default": {
- "BACKEND": "redis_cache.RedisCache",
- "LOCATION": [
- REDIS_URI,
- ],
- "OPTIONS": {
- "DB": 1,
- "PARSER_CLASS": "redis.connection.HiredisParser",
- "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
- "CONNECTION_POOL_CLASS_KWARGS": {
- "max_connections": 50,
- "timeout": 20,
- },
- "MAX_CONNECTIONS": 1000,
- "PICKLE_VERSION": -1,
- },
- },
-}
+from .prod_without_db import * # noqa: F401, F403
APPEND_SLASH = False
SECURE_SSL_REDIRECT = False
diff --git a/engine/settings/hobby.py b/engine/settings/hobby.py
index 3bd73c13..ca7299b0 100644
--- a/engine/settings/hobby.py
+++ b/engine/settings/hobby.py
@@ -1,37 +1,6 @@
-# flake8: noqa: F405
+from .prod_without_db import * # noqa: F403
-from random import randrange
-
-# Workaround to use pymysql instead of mysqlclient
-import pymysql
-
-from .prod_without_db import * # noqa
-
-pymysql.install_as_MySQLdb()
-
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.mysql",
- "NAME": os.environ.get("MYSQL_DB_NAME"),
- "USER": os.environ.get("MYSQL_USER"),
- "PASSWORD": os.environ["MYSQL_PASSWORD"],
- "HOST": os.environ.get("MYSQL_HOST"),
- "PORT": os.environ.get("MYSQL_PORT"),
- "OPTIONS": {
- "charset": "utf8mb4",
- "connect_timeout": 1,
- },
- },
-}
-
-RABBITMQ_USERNAME = os.environ.get("RABBITMQ_USERNAME")
-RABBITMQ_PASSWORD = os.environ.get("RABBITMQ_PASSWORD")
-RABBITMQ_HOST = os.environ.get("RABBITMQ_HOST")
-RABBITMQ_PORT = os.environ.get("RABBITMQ_PORT")
-
-CELERY_BROKER_URL = f"amqp://{RABBITMQ_USERNAME}:{RABBITMQ_PASSWORD}@{RABBITMQ_HOST}:{RABBITMQ_PORT}"
-
-MIRAGE_SECRET_KEY = SECRET_KEY
+MIRAGE_SECRET_KEY = SECRET_KEY # noqa: F405
MIRAGE_CIPHER_IV = "1234567890abcdef" # use default
APPEND_SLASH = False
diff --git a/engine/settings/prod_without_db.py b/engine/settings/prod_without_db.py
index 6b7c20d8..0c583483 100644
--- a/engine/settings/prod_without_db.py
+++ b/engine/settings/prod_without_db.py
@@ -15,36 +15,6 @@ except ModuleNotFoundError:
from .base import * # noqa
-# It's required for collectstatic to avoid connecting it to MySQL
-
-# Primary database must have the name "default"
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.sqlite3",
- "NAME": os.path.join(BASE_DIR, "db.sqlite3"), # noqa
- }
-}
-
-CACHES = {
- "default": {
- "BACKEND": "redis_cache.RedisCache",
- "LOCATION": [
- os.environ.get("REDIS_URI"),
- ],
- "OPTIONS": {
- "DB": 1,
- "PARSER_CLASS": "redis.connection.HiredisParser",
- "CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
- "CONNECTION_POOL_CLASS_KWARGS": {
- "max_connections": 50,
- "timeout": 20,
- },
- "MAX_CONNECTIONS": 1000,
- "PICKLE_VERSION": -1,
- },
- },
-}
-
SLACK_SIGNING_SECRET = os.environ.get("SLACK_SIGNING_SECRET")
SLACK_SIGNING_SECRET_LIVE = os.environ.get("SLACK_SIGNING_SECRET_LIVE", "")
@@ -56,8 +26,6 @@ STATIC_ROOT = "./collected_static/"
DEBUG = False
-CELERY_BROKER_URL = os.environ["RABBIT_URI"]
-
SECURE_SSL_REDIRECT = True
SECURE_REDIRECT_EXEMPT = [
"^health/",
diff --git a/grafana-plugin/CHANGELOG.md b/grafana-plugin/CHANGELOG.md
index ffaeb731..ebfa6fb2 100644
--- a/grafana-plugin/CHANGELOG.md
+++ b/grafana-plugin/CHANGELOG.md
@@ -1,67 +1,110 @@
# Change Log
+## v1.0.39 (2022-10-03)
+
+- Fix issue in v1.0.38 blocking the creation of schedules and webhooks in the UI
+
+## v1.0.38 (2022-09-30)
+
+- Fix exception handling for adding resolution notes when slack and oncall users are out of sync.
+- Fix all day events showing as having gaps in slack notifications
+- Improve plugin configuration error message readability
+- Add `telegram` key to `permalinks` property in `AlertGroup` public API response schema
+
+## v1.0.37 (2022-09-21)
+
+- Improve API token creation form
+- Fix alert group bulk action bugs
+- Add `permalinks` property to `AlertGroup` public API response schema
+- Scheduling system bug fixes
+- Public API bug fixes
+
+## v1.0.36 (2022-09-12)
+
+- Alpha web schedules frontend/backend updates
+- Bug fixes
+
## v1.0.35 (2022-09-07)
+
- Bug fixes
## v1.0.34 (2022-09-06)
+
- Fix schedule notification spam
## v1.0.33 (2022-09-06)
+
- Add raw alert view
- Add GitHub star button for OSS installations
- Restore alert group search functionality
- Bug fixes
## v1.0.32 (2022-09-01)
+
- Bug fixes
## v1.0.31 (2022-09-01)
+
- Bump celery version
- Fix oss to cloud connection
## v1.0.30 (2022-08-31)
+
- Bug fix: check user notification policy before access
## v1.0.29 (2022-08-31)
+
- Add arm64 docker image
## v1.0.28 (2022-08-31)
+
- Bug fixes
## v1.0.27 (2022-08-30)
+
- Bug fixes
## v1.0.26 (2022-08-26)
+
- Insight log's format fixes
- Remove UserNotificationPolicy auto-recreating
## v1.0.25 (2022-08-24)
+
- Bug fixes
## v1.0.24 (2022-08-24)
+
- Insight logs
- Default DATA_UPLOAD_MAX_MEMORY_SIZE to 1mb
## v1.0.23 (2022-08-23)
+
- Bug fixes
## v1.0.22 (2022-08-16)
+
- Make STATIC_URL configurable from environment variable
## v1.0.21 (2022-08-12)
+
- Bug fixes
## v1.0.19 (2022-08-10)
+
- Bug fixes
## v1.0.15 (2022-08-03)
+
- Bug fixes
## v1.0.13 (2022-07-27)
+
- Optimize alert group list view
- Fix a bug related to Twilio setup
## v1.0.12 (2022-07-26)
+
- Update push-notifications dependency
- Rework how absolute URLs are built
- Fix to show maintenance windows per team
@@ -69,15 +112,18 @@
- Internal api to get a schedule final events
## v1.0.10 (2022-07-22)
+
- Speed-up of alert group web caching
- Internal api for OnCall shifts
## v1.0.9 (2022-07-21)
+
- Frontend bug fixes & improvements
- Support regex_replace() in templates
- Bring back alert group caching and list view
## v1.0.7 (2022-07-18)
+
- Backend & frontend bug fixes
- Deployment improvements
- Reshape webhook payload for outgoing webhooks
@@ -85,18 +131,22 @@
- Improve alert group list load speeds and simplify caching system
## v1.0.6 (2022-07-12)
+
- Manual Incidents enabled for teams
- Fix phone notifications for OSS
- Public API improvements
## v1.0.5 (2022-07-06)
+
- Bump Django to 3.2.14
- Fix PagerDuty iCal parsing
## 1.0.4 (2022-06-28)
+
- Allow Telegram DMs without channel connection.
## 1.0.3 (2022-06-27)
+
- Fix users public api endpoint. Now it returns users with all roles.
- Fix redundant notifications about gaps in schedules.
- Frontend fixes.
diff --git a/grafana-plugin/src/GrafanaPluginRootPage.tsx b/grafana-plugin/src/GrafanaPluginRootPage.tsx
index 286bc509..4e3f6467 100644
--- a/grafana-plugin/src/GrafanaPluginRootPage.tsx
+++ b/grafana-plugin/src/GrafanaPluginRootPage.tsx
@@ -154,7 +154,7 @@ export const Root = observer((props: AppRootProps) => {
return (