commit
f7c9103eb3
35 changed files with 2363 additions and 490 deletions
20
CHANGELOG.md
20
CHANGELOG.md
|
|
@ -5,6 +5,26 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## v1.3.24 (2023-08-17)
|
||||
|
||||
### Added
|
||||
|
||||
- Shift swap requests public API ([#2775](https://github.com/grafana/oncall/pull/2775))
|
||||
- Shift swap request Slack follow-ups by @vadimkerr ([#2798](https://github.com/grafana/oncall/pull/2798))
|
||||
- Shift swap request push notification follow-ups by @vadimkerr ([#2805](https://github.com/grafana/oncall/pull/2805))
|
||||
|
||||
### Changed
|
||||
|
||||
- Improve default AlertManager template ([#2794](https://github.com/grafana/oncall/pull/2794))
|
||||
|
||||
### Fixed
|
||||
|
||||
- Ignore ical cancelled events when calculating shifts ([#2776](https://github.com/grafana/oncall/pull/2776))
|
||||
- Fix Slack acknowledgment reminders by @vadimkerr ([#2769](https://github.com/grafana/oncall/pull/2769))
|
||||
- Fix issue with updating "Require resolution note" setting by @Ferril ([#2782](https://github.com/grafana/oncall/pull/2782))
|
||||
- Don't send notifications about past SSRs when turning on info notifications by @vadimkerr ([#2783](https://github.com/grafana/oncall/pull/2783))
|
||||
- Add schedule shift type validation on create/preview ([#2789](https://github.com/grafana/oncall/pull/2789))
|
||||
|
||||
## v1.3.23 (2023-08-10)
|
||||
|
||||
### Added
|
||||
|
|
|
|||
|
|
@ -336,6 +336,7 @@ services:
|
|||
required: false
|
||||
profiles:
|
||||
- grafana
|
||||
|
||||
volumes:
|
||||
redisdata_dev:
|
||||
labels: *oncall-labels
|
||||
|
|
|
|||
344
docs/sources/oncall-api-reference/shift_swaps.md
Normal file
344
docs/sources/oncall-api-reference/shift_swaps.md
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
---
|
||||
canonical: https://grafana.com/docs/oncall/latest/oncall-api-reference/shift_swaps/
|
||||
title: Shift swaps HTTP API
|
||||
weight: 1200
|
||||
---
|
||||
|
||||
# Create a shift swap request
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/" \
|
||||
--request POST \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data '{
|
||||
"schedule": "SRJWJCMKD68AL",
|
||||
"swap_start": "2026-06-11T00:00:00Z",
|
||||
"swap_end": "2026-07-19T22:00:00Z",
|
||||
"description": "Anyone to cover my shifts?",
|
||||
"beneficiary": "UWJWIN8MQ1GYL"
|
||||
}'
|
||||
```
|
||||
|
||||
The above command returns JSON structured in the following way:
|
||||
|
||||
```json
|
||||
{
|
||||
"benefactor" : null,
|
||||
"beneficiary" : "UWJWIN8MQ1GYL",
|
||||
"created_at" : "2023-08-11T19:20:17.064677Z",
|
||||
"description" : "Anyone to cover my shifts?",
|
||||
"id" : "SSRG1TDNBMJQ1NC",
|
||||
"schedule" : "SRJWJCMKD68AL",
|
||||
"shifts" : [
|
||||
{
|
||||
"all_day" : false,
|
||||
"calendar_type" : 0,
|
||||
"end" : "2026-06-11T03:00:00Z",
|
||||
"is_empty" : false,
|
||||
"is_gap" : false,
|
||||
"is_override" : false,
|
||||
"missing_users" : [],
|
||||
"priority_level" : 2,
|
||||
"shift" : {
|
||||
"pk" : "OTI13GNNE5V1L"
|
||||
},
|
||||
"source" : "web",
|
||||
"start" : "2026-06-11T00:00:00Z",
|
||||
"users" : [
|
||||
{
|
||||
"avatar_full" : "http://avatar.url",
|
||||
"display_name" : "testing",
|
||||
"email" : "testing",
|
||||
"pk" : "UWJWIN8MQ1GYL",
|
||||
"swap_request" : {
|
||||
"pk" : "SSRG1TDNBMJQ1NC"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"status" : "open",
|
||||
"swap_end" : "2026-07-19T22:00:00.000000Z",
|
||||
"swap_start" : "2026-06-11T00:00:00.000000Z",
|
||||
"updated_at" : "2023-08-11T19:20:17.064922Z"
|
||||
}
|
||||
```
|
||||
|
||||
| Parameter | Unique | Required | Description |
|
||||
| -------------------- | :----: | :--------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `schedule` | No | Yes | ID of the schedule. |
|
||||
| `swap_start` | No | Yes | Start date/time for the swap request. Must be a ISO 8601 formatted datetime string. |
|
||||
| `swap_end` | No | No | End date/time for the swap request. Must be a ISO 8601 formatted datetime string. |
|
||||
| `description` | No | Optional | A description message to be displayed along the request. |
|
||||
| `beneficiary` | No | Yes | ID of the user requesting the swap. |
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`POST {{API_URL}}/api/v1/shift_swaps/`
|
||||
|
||||
# Get a shift swap request
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/SSRG1TDNBMJQ1NC/" \
|
||||
--request GET \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json"
|
||||
```
|
||||
|
||||
The above command returns JSON structured in the following way:
|
||||
|
||||
```json
|
||||
{
|
||||
"benefactor" : null,
|
||||
"beneficiary" : "UWJWIN8MQ1GYL",
|
||||
"created_at" : "2023-08-11T19:20:17.064677Z",
|
||||
"description" : "Anyone to cover my shifts?",
|
||||
"id" : "SSRG1TDNBMJQ1NC",
|
||||
"schedule" : "SRJWJCMKD68AL",
|
||||
"shifts" : [
|
||||
{
|
||||
"all_day" : false,
|
||||
"calendar_type" : 0,
|
||||
"end" : "2026-06-11T03:00:00Z",
|
||||
"is_empty" : false,
|
||||
"is_gap" : false,
|
||||
"is_override" : false,
|
||||
"missing_users" : [],
|
||||
"priority_level" : 2,
|
||||
"shift" : {
|
||||
"pk" : "OTI13GNNE5V1L"
|
||||
},
|
||||
"source" : "web",
|
||||
"start" : "2026-06-11T00:00:00Z",
|
||||
"users" : [
|
||||
{
|
||||
"avatar_full" : "http://avatar.url",
|
||||
"display_name" : "testing",
|
||||
"email" : "testing",
|
||||
"pk" : "UWJWIN8MQ1GYL",
|
||||
"swap_request" : {
|
||||
"pk" : "SSRG1TDNBMJQ1NC"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"status" : "open",
|
||||
"swap_end" : "2026-07-19T22:00:00.000000Z",
|
||||
"swap_start" : "2026-06-11T00:00:00.000000Z",
|
||||
"updated_at" : "2023-08-11T19:20:17.064922Z"
|
||||
}
|
||||
```
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`GET {{API_URL}}/api/v1/shift_swaps/<SHIFT_SWAP_REQUEST_ID>/`
|
||||
|
||||
# List shift swap requests
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/" \
|
||||
--request GET \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json"
|
||||
```
|
||||
|
||||
The above command returns JSON structured in the following way:
|
||||
|
||||
```json
|
||||
{
|
||||
"count" : 2,
|
||||
"current_page_number" : 1,
|
||||
"next" : null,
|
||||
"page_size" : 50,
|
||||
"previous" : null,
|
||||
"results" : [
|
||||
{
|
||||
"benefactor" : "UWJWIN8MQ1GYL",
|
||||
"beneficiary" : "UCGEIXI1MR1NZ",
|
||||
"created_at" : "2023-08-07T18:44:15.249679Z",
|
||||
"description" : "Taking a few days off.",
|
||||
"id" : "SSRK2EH2TR6E4F9",
|
||||
"schedule" : "SRZZFY1QI9FLL",
|
||||
"status" : "taken",
|
||||
"swap_end" : "2024-09-29T03:00:18.000000Z",
|
||||
"swap_start" : "2024-09-26T03:00:18.000000Z",
|
||||
"updated_at" : "2024-08-07T18:44:15.249960Z"
|
||||
},
|
||||
{
|
||||
"benefactor" : null,
|
||||
"beneficiary" : "UWJWIN8MQ1GYL",
|
||||
"created_at" : "2023-08-11T19:20:17.064677Z",
|
||||
"description" : "Anyone to cover my shifts?",
|
||||
"id" : "SSRG1TDNBMJQ1NC",
|
||||
"schedule" : "SRJWJCMKD68AL",
|
||||
"status" : "open",
|
||||
"swap_end" : "2026-07-19T22:00:00.000000Z",
|
||||
"swap_start" : "2026-06-11T00:00:00.000000Z",
|
||||
"updated_at" : "2023-08-11T19:20:17.064922Z"
|
||||
}
|
||||
],
|
||||
"total_pages" : 1
|
||||
}
|
||||
```
|
||||
|
||||
The following available filter parameters may be provided as a `GET` arguments:
|
||||
|
||||
- `starting_after` (an ISO 8601 timestamp string, filter requests starting after the specified datetime)
|
||||
- `schedule_id` (Exact match, schedule ID)
|
||||
- `beneficiary` (Exact match, user ID)
|
||||
- `benefactor` (Exact match, user ID)
|
||||
- `open_only` (set to `true` to filter active untaken requests only)
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`GET {{API_URL}}/api/v1/shift_swaps/`
|
||||
|
||||
# Update a shift swap request
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/SSRG1TDNBMJQ1NC/" \
|
||||
--request PUT \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data '{
|
||||
"schedule": "SRJWJCMKD68AL",
|
||||
"swap_start": "2026-06-11T00:00:00Z",
|
||||
"swap_end": "2026-07-20T22:00:00Z"
|
||||
}'
|
||||
```
|
||||
|
||||
The above command returns JSON structured in the following way:
|
||||
|
||||
```json
|
||||
{
|
||||
"benefactor" : null,
|
||||
"beneficiary" : "UWJWIN8MQ1GYL",
|
||||
"created_at" : "2023-08-11T19:20:17.064677Z",
|
||||
"description" : "Anyone to cover my shifts?",
|
||||
"id" : "SSRG1TDNBMJQ1NC",
|
||||
"schedule" : "SRJWJCMKD68AL",
|
||||
"shifts" : [
|
||||
{
|
||||
"all_day" : false,
|
||||
"calendar_type" : 0,
|
||||
"end" : "2026-06-11T03:00:00Z",
|
||||
"is_empty" : false,
|
||||
"is_gap" : false,
|
||||
"is_override" : false,
|
||||
"missing_users" : [],
|
||||
"priority_level" : 2,
|
||||
"shift" : {
|
||||
"pk" : "OTI13GNNE5V1L"
|
||||
},
|
||||
"source" : "web",
|
||||
"start" : "2026-06-11T00:00:00Z",
|
||||
"users" : [
|
||||
{
|
||||
"avatar_full" : "http://avatar.url",
|
||||
"display_name" : "testing",
|
||||
"email" : "testing",
|
||||
"pk" : "UWJWIN8MQ1GYL",
|
||||
"swap_request" : {
|
||||
"pk" : "SSRG1TDNBMJQ1NC"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"status" : "open",
|
||||
"swap_end" : "2026-07-20T22:00:00.000000Z",
|
||||
"swap_start" : "2026-06-11T00:00:00.000000Z",
|
||||
"updated_at" : "2023-08-11T19:45:53.096811Z"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`PUT {{API_URL}}/api/v1/shift_swaps/<SHIFT_SWAP_REQUEST_ID>/`
|
||||
|
||||
# Delete a shift swap request
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/SSRG1TDNBMJQ1NC/" \
|
||||
--request DELETE \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json"
|
||||
```
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`DELETE {{API_URL}}/api/v1/shift_swaps/<SHIFT_SWAP_REQUEST_ID>/`
|
||||
|
||||
# Take a shift swap request
|
||||
|
||||
```shell
|
||||
curl "{{API_URL}}/api/v1/shift_swaps/SSRG1TDNBMJQ1NC/take" \
|
||||
--request POST \
|
||||
--header "Authorization: meowmeowmeow" \
|
||||
--header "Content-Type: application/json" \
|
||||
--data '{
|
||||
"benefactor": "UCGEIXI1MR1NZ"
|
||||
}'
|
||||
```
|
||||
|
||||
The above command returns JSON structured in the following way:
|
||||
|
||||
```json
|
||||
{
|
||||
"benefactor" : "UCGEIXI1MR1NZ",
|
||||
"beneficiary" : "UWJWIN8MQ1GYL",
|
||||
"created_at" : "2023-08-11T19:20:17.064677Z",
|
||||
"description" : "Anyone to cover my shifts?",
|
||||
"id" : "SSRG1TDNBMJQ1NC",
|
||||
"schedule" : "SRJWJCMKD68AL",
|
||||
"shifts" : [
|
||||
{
|
||||
"all_day" : false,
|
||||
"calendar_type" : 0,
|
||||
"end" : "2026-06-11T03:00:00Z",
|
||||
"is_empty" : false,
|
||||
"is_gap" : false,
|
||||
"is_override" : false,
|
||||
"missing_users" : [],
|
||||
"priority_level" : 2,
|
||||
"shift" : {
|
||||
"pk" : "OTI13GNNE5V1L"
|
||||
},
|
||||
"source" : "web",
|
||||
"start" : "2026-06-11T00:00:00Z",
|
||||
"users" : [
|
||||
{
|
||||
"avatar_full" : "http://avatar.url",
|
||||
"display_name" : "anotherone",
|
||||
"email" : "anotherone",
|
||||
"pk" : "UCGEIXI1MR1NZ",
|
||||
"swap_request" : {
|
||||
"pk" : "SSRG1TDNBMJQ1NC",
|
||||
"user" : {
|
||||
"avatar_full" : "http://avatar.url",
|
||||
"display_name" : "testing",
|
||||
"email" : "testing",
|
||||
"pk" : "UWJWIN8MQ1GYL"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"status" : "taken",
|
||||
"swap_end" : "2026-07-20T22:00:00.000000Z",
|
||||
"swap_start" : "2026-06-11T00:00:00.000000Z",
|
||||
"updated_at" : "2023-08-11T19:51:38.622037Z"
|
||||
}
|
||||
```
|
||||
|
||||
| Parameter | Unique | Required | Description |
|
||||
| -------------------- | :----: | :--------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `benefactor` | No | Yes | ID of the user taking the swap. |
|
||||
|
||||
**HTTP request**
|
||||
|
||||
`POST {{API_URL}}/api/v1/shift_swaps/<SHIFT_SWAP_REQUEST_ID>/take`
|
||||
|
|
@ -4,7 +4,6 @@ import typing
|
|||
import urllib
|
||||
from collections import namedtuple
|
||||
from urllib.parse import urljoin
|
||||
from uuid import UUID, uuid1
|
||||
|
||||
from celery import uuid as celery_uuid
|
||||
from django.conf import settings
|
||||
|
|
@ -330,9 +329,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
related_name="dependent_alert_groups",
|
||||
)
|
||||
|
||||
# NOTE: we should probably migrate this field to models.UUIDField as it's ONLY ever being
|
||||
# set to the result of uuid.uuid1
|
||||
last_unique_unacknowledge_process_id: UUID | None = models.CharField(max_length=100, null=True, default=None)
|
||||
last_unique_unacknowledge_process_id = models.CharField(max_length=100, null=True, default=None)
|
||||
|
||||
wiped_at = models.DateTimeField(null=True, default=None)
|
||||
wiped_by = models.ForeignKey(
|
||||
|
|
@ -568,8 +565,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
self._update_metrics(organization_id=user.organization_id, previous_state=initial_state, state=self.state)
|
||||
|
||||
self.stop_escalation()
|
||||
if self.is_root_alert_group:
|
||||
self.start_ack_reminder(user)
|
||||
self.start_ack_reminder_if_needed()
|
||||
|
||||
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_ACK, author=user)
|
||||
|
||||
|
|
@ -1218,8 +1214,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
state=AlertGroupState.ACKNOWLEDGED,
|
||||
)
|
||||
|
||||
if alert_group.is_root_alert_group:
|
||||
alert_group.start_ack_reminder(user)
|
||||
alert_group.start_ack_reminder_if_needed()
|
||||
|
||||
log_record = alert_group.log_records.create(type=AlertGroupLogRecord.TYPE_ACK, author=user)
|
||||
send_alert_group_signal.apply_async((log_record.pk,))
|
||||
|
|
@ -1599,28 +1594,20 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
AlertGroup._bulk_silence(user, root_alert_groups_to_silence, silence_delay)
|
||||
AlertGroup._bulk_silence(user, dependent_alert_groups_to_silence, silence_delay)
|
||||
|
||||
def start_ack_reminder(self, user: User):
|
||||
def start_ack_reminder_if_needed(self) -> None:
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
unique_unacknowledge_process_id = uuid1()
|
||||
logger.info(
|
||||
f"AlertGroup acknowledged by user with pk "
|
||||
f"{user.pk}, "
|
||||
f"acknowledge timeout task has been started with process id {unique_unacknowledge_process_id}"
|
||||
)
|
||||
if not self.is_root_alert_group:
|
||||
return
|
||||
|
||||
seconds = Organization.ACKNOWLEDGE_REMIND_DELAY[self.channel.organization.acknowledge_remind_timeout]
|
||||
if seconds > 0:
|
||||
delay = datetime.timedelta(seconds=seconds).total_seconds()
|
||||
acknowledge_reminder_task.apply_async(
|
||||
(
|
||||
self.pk,
|
||||
unique_unacknowledge_process_id,
|
||||
),
|
||||
countdown=delay,
|
||||
)
|
||||
self.last_unique_unacknowledge_process_id = unique_unacknowledge_process_id
|
||||
self.save(update_fields=["last_unique_unacknowledge_process_id"])
|
||||
# Check if the "Remind every N hours" setting is enabled
|
||||
countdown = Organization.ACKNOWLEDGE_REMIND_DELAY[self.channel.organization.acknowledge_remind_timeout]
|
||||
if not countdown:
|
||||
return
|
||||
|
||||
self.last_unique_unacknowledge_process_id = celery_uuid()
|
||||
self.save(update_fields=["last_unique_unacknowledge_process_id"])
|
||||
acknowledge_reminder_task.apply_async((self.pk, self.last_unique_unacknowledge_process_id), countdown=countdown)
|
||||
|
||||
def start_unsilence_task(self, countdown):
|
||||
task_id = celery_uuid()
|
||||
|
|
|
|||
|
|
@ -6,122 +6,108 @@ from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
|||
from .send_alert_group_signal import send_alert_group_signal
|
||||
from .task_logger import task_logger
|
||||
|
||||
MAX_RETRIES = 1 if settings.DEBUG else None
|
||||
|
||||
@shared_dedicated_queue_retry_task(
|
||||
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
|
||||
)
|
||||
def acknowledge_reminder_task(alert_group_pk, unacknowledge_process_id):
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def acknowledge_reminder_task(alert_group_pk: int, unacknowledge_process_id: str) -> None:
|
||||
from apps.alerts.models import AlertGroup, AlertGroupLogRecord
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
log_record = None
|
||||
|
||||
task_logger.info(f"Starting a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
return f"acknowledge_reminder_task: Alert group with pk {alert_group_pk} doesn't exist"
|
||||
alert_group = AlertGroup.objects.select_for_update().get(pk=alert_group_pk) # Lock alert_group
|
||||
except AlertGroup.DoesNotExist:
|
||||
task_logger.warning(f"AlertGroup {alert_group_pk} does not exist")
|
||||
return
|
||||
|
||||
if alert_group.last_unique_unacknowledge_process_id == unacknowledge_process_id:
|
||||
alert_group.acknowledged_by_confirmed = None
|
||||
alert_group.save(update_fields=["acknowledged_by_confirmed"])
|
||||
if alert_group.status == AlertGroup.ACKNOWLEDGED and alert_group.is_root_alert_group:
|
||||
if alert_group.acknowledged and alert_group.acknowledged_by == AlertGroup.USER:
|
||||
log_record = AlertGroupLogRecord(
|
||||
type=AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED,
|
||||
author=alert_group.acknowledged_by_user,
|
||||
alert_group=alert_group,
|
||||
)
|
||||
seconds_unack = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
|
||||
alert_group.channel.organization.unacknowledge_timeout
|
||||
]
|
||||
if (
|
||||
alert_group.channel.organization.unacknowledge_timeout
|
||||
!= Organization.UNACKNOWLEDGE_TIMEOUT_NEVER
|
||||
):
|
||||
unacknowledge_timeout_task.apply_async(
|
||||
(alert_group.pk, unacknowledge_process_id),
|
||||
countdown=seconds_unack,
|
||||
)
|
||||
else:
|
||||
if (
|
||||
alert_group.channel.organization.acknowledge_remind_timeout
|
||||
!= Organization.ACKNOWLEDGE_REMIND_NEVER
|
||||
):
|
||||
seconds_remind = Organization.ACKNOWLEDGE_REMIND_DELAY[
|
||||
alert_group.channel.organization.acknowledge_remind_timeout
|
||||
]
|
||||
acknowledge_reminder_task.apply_async(
|
||||
(
|
||||
alert_group.pk,
|
||||
unacknowledge_process_id,
|
||||
),
|
||||
countdown=seconds_remind,
|
||||
)
|
||||
if log_record is not None:
|
||||
log_record.save()
|
||||
task_logger.debug(
|
||||
f"call send_alert_group_signal for alert_group {alert_group_pk}, "
|
||||
f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
|
||||
if unacknowledge_process_id != alert_group.last_unique_unacknowledge_process_id:
|
||||
return
|
||||
|
||||
# Get timeout values
|
||||
acknowledge_reminder_timeout = Organization.ACKNOWLEDGE_REMIND_DELAY[
|
||||
alert_group.channel.organization.acknowledge_remind_timeout
|
||||
]
|
||||
unacknowledge_timeout = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
|
||||
alert_group.channel.organization.unacknowledge_timeout
|
||||
]
|
||||
|
||||
# Don't proceed if the alert group is not in a state for acknowledgement reminder
|
||||
acknowledge_reminder_required = (
|
||||
alert_group.is_root_alert_group
|
||||
and alert_group.status == AlertGroup.ACKNOWLEDGED
|
||||
and alert_group.acknowledged_by == AlertGroup.USER
|
||||
and acknowledge_reminder_timeout
|
||||
)
|
||||
if not acknowledge_reminder_required:
|
||||
task_logger.info("AlertGroup is not in a state for acknowledgement reminder")
|
||||
return
|
||||
|
||||
# unacknowledge_timeout_task uses acknowledged_by_confirmed to check if acknowledgement reminder has been confirmed
|
||||
# by the user. Setting to None here to indicate that the user has not confirmed the acknowledgement reminder
|
||||
alert_group.acknowledged_by_confirmed = None
|
||||
alert_group.save(update_fields=["acknowledged_by_confirmed"])
|
||||
|
||||
if unacknowledge_timeout: # "unack in N minutes if no response" is enabled
|
||||
unacknowledge_timeout_task.apply_async(
|
||||
(alert_group.pk, unacknowledge_process_id), countdown=unacknowledge_timeout
|
||||
)
|
||||
else:
|
||||
acknowledge_reminder_task.apply_async(
|
||||
(alert_group.pk, unacknowledge_process_id), countdown=acknowledge_reminder_timeout
|
||||
)
|
||||
transaction.on_commit(lambda: send_alert_group_signal.apply_async((log_record.pk,)))
|
||||
|
||||
task_logger.info(f"Finished a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
|
||||
log_record = alert_group.log_records.create(
|
||||
type=AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED, author=alert_group.acknowledged_by_user
|
||||
)
|
||||
transaction.on_commit(lambda: send_alert_group_signal.delay(log_record.pk))
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(
|
||||
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
|
||||
)
|
||||
def unacknowledge_timeout_task(alert_group_pk, unacknowledge_process_id):
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def unacknowledge_timeout_task(alert_group_pk: int, unacknowledge_process_id: str) -> None:
|
||||
from apps.alerts.models import AlertGroup, AlertGroupLogRecord
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
log_record = None
|
||||
|
||||
task_logger.info(
|
||||
f"Starting an unacknowledge task " f"for acknowledgement timeout with process id {unacknowledge_process_id}"
|
||||
)
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
return f"unacknowledge_timeout_task: Alert group with pk {alert_group_pk} doesn't exist"
|
||||
alert_group = AlertGroup.objects.select_for_update().get(pk=alert_group_pk) # Lock alert_group
|
||||
except AlertGroup.DoesNotExist:
|
||||
task_logger.warning(f"AlertGroup {alert_group_pk} does not exist")
|
||||
return
|
||||
|
||||
if unacknowledge_process_id == alert_group.last_unique_unacknowledge_process_id:
|
||||
if not alert_group.resolved and alert_group.acknowledged and alert_group.is_root_alert_group:
|
||||
if not alert_group.acknowledged_by_confirmed:
|
||||
log_record = AlertGroupLogRecord(
|
||||
type=AlertGroupLogRecord.TYPE_AUTO_UN_ACK,
|
||||
author=alert_group.acknowledged_by_user,
|
||||
alert_group=alert_group,
|
||||
)
|
||||
alert_group.unacknowledge()
|
||||
alert_group.start_escalation_if_needed()
|
||||
else:
|
||||
seconds_remind = Organization.ACKNOWLEDGE_REMIND_DELAY[
|
||||
alert_group.channel.organization.acknowledge_remind_timeout
|
||||
]
|
||||
seconds_unack = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
|
||||
alert_group.channel.organization.unacknowledge_timeout
|
||||
]
|
||||
seconds = seconds_remind - seconds_unack
|
||||
acknowledge_reminder_task.apply_async(
|
||||
(
|
||||
alert_group_pk,
|
||||
unacknowledge_process_id,
|
||||
),
|
||||
countdown=seconds,
|
||||
)
|
||||
if unacknowledge_process_id != alert_group.last_unique_unacknowledge_process_id:
|
||||
return
|
||||
|
||||
if log_record is not None:
|
||||
log_record.save()
|
||||
task_logger.debug(
|
||||
f"call send_alert_group_signal for alert_group {alert_group_pk}, "
|
||||
f"log record {log_record.pk} with type '{log_record.get_type_display()}'"
|
||||
)
|
||||
transaction.on_commit(lambda: send_alert_group_signal.apply_async((log_record.pk,)))
|
||||
# Get timeout values
|
||||
acknowledge_reminder_timeout = Organization.ACKNOWLEDGE_REMIND_DELAY[
|
||||
alert_group.channel.organization.acknowledge_remind_timeout
|
||||
]
|
||||
unacknowledge_timeout = Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[
|
||||
alert_group.channel.organization.unacknowledge_timeout
|
||||
]
|
||||
|
||||
task_logger.info(
|
||||
f"Starting an unacknowledge task for acknowledgement timeout with process id {unacknowledge_process_id}"
|
||||
# Don't proceed if the alert group is not in a state for auto-unacknowledge
|
||||
unacknowledge_required = (
|
||||
alert_group.is_root_alert_group
|
||||
and alert_group.status == AlertGroup.ACKNOWLEDGED
|
||||
and alert_group.acknowledged_by == AlertGroup.USER
|
||||
and acknowledge_reminder_timeout
|
||||
and unacknowledge_timeout
|
||||
)
|
||||
if not unacknowledge_required:
|
||||
task_logger.info("AlertGroup is not in a state for unacknowledge")
|
||||
return
|
||||
|
||||
if alert_group.acknowledged_by_confirmed: # acknowledgement reminder was confirmed by the user
|
||||
acknowledge_reminder_task.apply_async(
|
||||
(alert_group_pk, unacknowledge_process_id), countdown=acknowledge_reminder_timeout - unacknowledge_timeout
|
||||
)
|
||||
return
|
||||
|
||||
# If acknowledgement reminder wasn't confirmed by the user, unacknowledge the alert group and start escalation again
|
||||
log_record = alert_group.log_records.create(
|
||||
type=AlertGroupLogRecord.TYPE_AUTO_UN_ACK, author=alert_group.acknowledged_by_user
|
||||
)
|
||||
transaction.on_commit(lambda: send_alert_group_signal.delay(log_record.pk))
|
||||
alert_group.unacknowledge()
|
||||
alert_group.start_escalation_if_needed()
|
||||
|
|
|
|||
301
engine/apps/alerts/tests/test_acknowledge_reminder.py
Normal file
301
engine/apps/alerts/tests/test_acknowledge_reminder.py
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from celery import uuid as celery_uuid
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.alerts.constants import ActionSource
|
||||
from apps.alerts.models import AlertGroup, AlertGroupLogRecord
|
||||
from apps.alerts.tasks import acknowledge_reminder_task
|
||||
from apps.alerts.tasks.acknowledge_reminder import unacknowledge_timeout_task
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
ROOT_ALERT_GROUP_ID = 42
|
||||
TASK_ID = "TASK_ID"
|
||||
|
||||
|
||||
def _parametrize_or(best, worst):
|
||||
"""
|
||||
Utility method to parametrize tests with multiple OR conditions. best = best case, when all the conditions in
|
||||
the OR statement are True. worst = worst case, when all the conditions in the OR statement are False.
|
||||
"""
|
||||
assert len(best) == len(worst)
|
||||
return [(*best[:i], worst[i], *best[i + 1 :]) for i in range(len(best))]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ack_reminder_test_setup(
|
||||
make_organization,
|
||||
make_user,
|
||||
make_alert_receive_channel,
|
||||
make_alert_group,
|
||||
make_alert,
|
||||
):
|
||||
def _ack_reminder_test_setup(
|
||||
task_id=TASK_ID,
|
||||
acknowledged=True,
|
||||
acknowledged_by=AlertGroup.USER,
|
||||
resolved=False,
|
||||
root_alert_group_id=None,
|
||||
acknowledge_remind_timeout=Organization.ACKNOWLEDGE_REMIND_1H,
|
||||
unacknowledge_timeout=Organization.UNACKNOWLEDGE_TIMEOUT_5MIN,
|
||||
acknowledged_by_confirmed=None,
|
||||
):
|
||||
organization = make_organization(
|
||||
acknowledge_remind_timeout=acknowledge_remind_timeout, unacknowledge_timeout=unacknowledge_timeout
|
||||
)
|
||||
user = make_user(organization=organization)
|
||||
alert_receive_channel = make_alert_receive_channel(organization)
|
||||
make_alert_group(alert_receive_channel=alert_receive_channel, pk=ROOT_ALERT_GROUP_ID)
|
||||
alert_group = make_alert_group(
|
||||
alert_receive_channel,
|
||||
acknowledged=acknowledged,
|
||||
acknowledged_by=acknowledged_by,
|
||||
acknowledged_by_user=user,
|
||||
resolved=resolved,
|
||||
root_alert_group_id=root_alert_group_id,
|
||||
)
|
||||
|
||||
alert_group.last_unique_unacknowledge_process_id = task_id
|
||||
alert_group.acknowledged_by_confirmed = acknowledged_by_confirmed
|
||||
alert_group.save(update_fields=["last_unique_unacknowledge_process_id", "acknowledged_by_confirmed"])
|
||||
|
||||
return organization, alert_group, user
|
||||
|
||||
return _ack_reminder_test_setup
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_acknowledge_by_user_invokes_start_ack_reminder(ack_reminder_test_setup):
|
||||
organization, alert_group, user = ack_reminder_test_setup(acknowledged=False)
|
||||
|
||||
with patch.object(alert_group, "start_ack_reminder_if_needed") as mock_start_ack_reminder:
|
||||
alert_group.acknowledge_by_user(user, ActionSource.SLACK)
|
||||
mock_start_ack_reminder.assert_called_once_with()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_acknowledge_invokes_start_ack_reminder(ack_reminder_test_setup):
|
||||
organization, alert_group, user = ack_reminder_test_setup(acknowledged=False)
|
||||
|
||||
with patch.object(AlertGroup, "start_ack_reminder_if_needed") as mock_start_ack_reminder:
|
||||
AlertGroup.bulk_acknowledge(user, AlertGroup.objects.filter(pk=alert_group.pk))
|
||||
mock_start_ack_reminder.assert_called_once_with()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_start_ack_reminder_invokes_acknowledge_reminder_task(ack_reminder_test_setup):
|
||||
organization, alert_group, user = ack_reminder_test_setup()
|
||||
|
||||
# make sure celery_uuid returns a string to be passed to the task
|
||||
assert type(celery_uuid()) == str
|
||||
|
||||
with patch.object(acknowledge_reminder_task, "apply_async") as mock_acknowledge_reminder_task:
|
||||
with patch("apps.alerts.models.alert_group.celery_uuid", return_value=TASK_ID):
|
||||
alert_group.start_ack_reminder_if_needed()
|
||||
mock_acknowledge_reminder_task.assert_called_once_with(
|
||||
(alert_group.pk, TASK_ID),
|
||||
countdown=Organization.ACKNOWLEDGE_REMIND_DELAY[organization.acknowledge_remind_timeout],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"root_alert_group_id,acknowledge_remind_timeout",
|
||||
_parametrize_or(
|
||||
best=(None, Organization.ACKNOWLEDGE_REMIND_1H),
|
||||
worst=(ROOT_ALERT_GROUP_ID, Organization.ACKNOWLEDGE_REMIND_NEVER),
|
||||
),
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_ack_reminder_skip(ack_reminder_test_setup, root_alert_group_id, acknowledge_remind_timeout):
|
||||
organization, alert_group, user = ack_reminder_test_setup(
|
||||
acknowledge_remind_timeout=acknowledge_remind_timeout, root_alert_group_id=root_alert_group_id
|
||||
)
|
||||
|
||||
with patch.object(acknowledge_reminder_task, "apply_async") as mock_acknowledge_reminder_task:
|
||||
alert_group.start_ack_reminder_if_needed()
|
||||
mock_acknowledge_reminder_task.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"task_id,acknowledged,acknowledged_by,resolved,root_alert_group_id,acknowledge_remind_timeout",
|
||||
_parametrize_or(
|
||||
best=(TASK_ID, True, AlertGroup.USER, False, None, Organization.ACKNOWLEDGE_REMIND_1H),
|
||||
worst=(None, False, AlertGroup.SOURCE, True, ROOT_ALERT_GROUP_ID, Organization.ACKNOWLEDGE_REMIND_NEVER),
|
||||
),
|
||||
)
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_acknowledge_reminder_task_skip(
|
||||
mock_acknowledge_reminder_task,
|
||||
mock_unacknowledge_timeout_task,
|
||||
ack_reminder_test_setup,
|
||||
task_id,
|
||||
acknowledged,
|
||||
acknowledged_by,
|
||||
resolved,
|
||||
root_alert_group_id,
|
||||
acknowledge_remind_timeout,
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup(
|
||||
task_id=task_id,
|
||||
acknowledged=acknowledged,
|
||||
acknowledged_by=acknowledged_by,
|
||||
resolved=resolved,
|
||||
root_alert_group_id=root_alert_group_id,
|
||||
acknowledge_remind_timeout=acknowledge_remind_timeout,
|
||||
)
|
||||
acknowledge_reminder_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_unacknowledge_timeout_task.assert_not_called()
|
||||
mock_acknowledge_reminder_task.assert_not_called()
|
||||
|
||||
assert not alert_group.log_records.exists()
|
||||
|
||||
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_acknowledge_reminder_task_reschedules_itself(
|
||||
mock_acknowledge_reminder_task, mock_unacknowledge_timeout_task, ack_reminder_test_setup
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup(
|
||||
unacknowledge_timeout=Organization.UNACKNOWLEDGE_TIMEOUT_NEVER
|
||||
)
|
||||
acknowledge_reminder_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_unacknowledge_timeout_task.assert_not_called()
|
||||
mock_acknowledge_reminder_task.assert_called_once_with(
|
||||
(alert_group.pk, TASK_ID),
|
||||
countdown=Organization.ACKNOWLEDGE_REMIND_DELAY[organization.acknowledge_remind_timeout],
|
||||
)
|
||||
|
||||
log_record = alert_group.log_records.get()
|
||||
assert log_record.type == AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED
|
||||
assert log_record.author == alert_group.acknowledged_by_user
|
||||
|
||||
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_acknowledge_reminder_task_invokes_unacknowledge_timeout_task(
|
||||
mock_acknowledge_reminder_task, mock_unacknowledge_timeout_task, ack_reminder_test_setup
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup(
|
||||
unacknowledge_timeout=Organization.UNACKNOWLEDGE_TIMEOUT_5MIN
|
||||
)
|
||||
acknowledge_reminder_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_acknowledge_reminder_task.assert_not_called()
|
||||
mock_unacknowledge_timeout_task.assert_called_with(
|
||||
(alert_group.pk, TASK_ID),
|
||||
countdown=Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[organization.unacknowledge_timeout],
|
||||
)
|
||||
|
||||
alert_group.refresh_from_db()
|
||||
assert alert_group.acknowledged_by_confirmed is None
|
||||
|
||||
log_record = alert_group.log_records.get()
|
||||
assert log_record.type == AlertGroupLogRecord.TYPE_ACK_REMINDER_TRIGGERED
|
||||
assert log_record.author == alert_group.acknowledged_by_user
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"task_id,acknowledged,acknowledged_by,resolved,root_alert_group_id,acknowledge_remind_timeout,unacknowledge_timeout",
|
||||
_parametrize_or(
|
||||
best=(
|
||||
TASK_ID,
|
||||
True,
|
||||
AlertGroup.USER,
|
||||
False,
|
||||
None,
|
||||
Organization.ACKNOWLEDGE_REMIND_1H,
|
||||
Organization.UNACKNOWLEDGE_TIMEOUT_5MIN,
|
||||
),
|
||||
worst=(
|
||||
None,
|
||||
False,
|
||||
AlertGroup.SOURCE,
|
||||
True,
|
||||
ROOT_ALERT_GROUP_ID,
|
||||
Organization.ACKNOWLEDGE_REMIND_NEVER,
|
||||
Organization.UNACKNOWLEDGE_TIMEOUT_NEVER,
|
||||
),
|
||||
),
|
||||
)
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_unacknowledge_timeout_task_skip(
|
||||
mock_acknowledge_reminder_task,
|
||||
mock_unacknowledge_timeout_task,
|
||||
ack_reminder_test_setup,
|
||||
task_id,
|
||||
acknowledged,
|
||||
acknowledged_by,
|
||||
resolved,
|
||||
root_alert_group_id,
|
||||
acknowledge_remind_timeout,
|
||||
unacknowledge_timeout,
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup(
|
||||
task_id=task_id,
|
||||
acknowledged=acknowledged,
|
||||
acknowledged_by=acknowledged_by,
|
||||
resolved=resolved,
|
||||
root_alert_group_id=root_alert_group_id,
|
||||
acknowledge_remind_timeout=acknowledge_remind_timeout,
|
||||
unacknowledge_timeout=unacknowledge_timeout,
|
||||
)
|
||||
unacknowledge_timeout_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_unacknowledge_timeout_task.assert_not_called()
|
||||
mock_acknowledge_reminder_task.assert_not_called()
|
||||
|
||||
assert not alert_group.log_records.exists()
|
||||
|
||||
|
||||
@patch.object(AlertGroup, "start_escalation_if_needed")
|
||||
@patch.object(AlertGroup, "unacknowledge")
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_unacknowledge_timeout_task_unacknowledge(
|
||||
mock_acknowledge_reminder_task,
|
||||
mock_unacknowledge_timeout_task,
|
||||
mock_unacknowledge,
|
||||
mock_start_escalation_if_needed,
|
||||
ack_reminder_test_setup,
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup()
|
||||
unacknowledge_timeout_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_unacknowledge_timeout_task.assert_not_called()
|
||||
mock_acknowledge_reminder_task.assert_not_called()
|
||||
|
||||
log_record = alert_group.log_records.get()
|
||||
assert log_record.type == AlertGroupLogRecord.TYPE_AUTO_UN_ACK
|
||||
assert log_record.author == alert_group.acknowledged_by_user
|
||||
|
||||
mock_unacknowledge.assert_called_once_with()
|
||||
mock_start_escalation_if_needed.assert_called_once_with()
|
||||
|
||||
|
||||
@patch.object(unacknowledge_timeout_task, "apply_async")
|
||||
@patch.object(acknowledge_reminder_task, "apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_unacknowledge_timeout_task_no_unacknowledge(
|
||||
mock_acknowledge_reminder_task, mock_unacknowledge_timeout_task, ack_reminder_test_setup
|
||||
):
|
||||
organization, alert_group, user = ack_reminder_test_setup(acknowledged_by_confirmed=timezone.now())
|
||||
unacknowledge_timeout_task(alert_group.pk, TASK_ID)
|
||||
|
||||
mock_unacknowledge_timeout_task.assert_not_called()
|
||||
mock_acknowledge_reminder_task.assert_called_once_with(
|
||||
(alert_group.pk, TASK_ID),
|
||||
countdown=Organization.ACKNOWLEDGE_REMIND_DELAY[organization.acknowledge_remind_timeout]
|
||||
- Organization.UNACKNOWLEDGE_TIMEOUT_DELAY[organization.unacknowledge_timeout],
|
||||
)
|
||||
|
||||
assert not alert_group.log_records.exists()
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
import datetime
|
||||
import logging
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
from drf_spectacular.utils import extend_schema_field, inline_serializer
|
||||
from rest_framework import serializers
|
||||
|
||||
from apps.alerts.incident_appearance.renderers.classic_markdown_renderer import AlertGroupClassicMarkdownRenderer
|
||||
|
|
@ -13,7 +15,7 @@ from common.api_helpers.mixins import EagerLoadingMixin
|
|||
from .alert import AlertSerializer
|
||||
from .alert_receive_channel import FastAlertReceiveChannelSerializer
|
||||
from .alerts_field_cache_buster_mixin import AlertsFieldCacheBusterMixin
|
||||
from .user import FastUserSerializer
|
||||
from .user import FastUserSerializer, UserShortSerializer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
|
@ -62,7 +64,19 @@ class ShortAlertGroupSerializer(AlertGroupFieldsCacheSerializerMixin, serializer
|
|||
class Meta:
|
||||
model = AlertGroup
|
||||
fields = ["pk", "render_for_web", "alert_receive_channel", "inside_organization_number"]
|
||||
read_only_fields = ["pk", "render_for_web", "alert_receive_channel", "inside_organization_number"]
|
||||
|
||||
@extend_schema_field(
|
||||
inline_serializer(
|
||||
name="render_for_web",
|
||||
fields={
|
||||
"title": serializers.CharField(),
|
||||
"message": serializers.CharField(),
|
||||
"image_url": serializers.CharField(),
|
||||
"source_link": serializers.CharField(),
|
||||
},
|
||||
)
|
||||
)
|
||||
def get_render_for_web(self, obj):
|
||||
last_alert = obj.alerts.last()
|
||||
if last_alert is None:
|
||||
|
|
@ -138,6 +152,17 @@ class AlertGroupListSerializer(EagerLoadingMixin, AlertGroupFieldsCacheSerialize
|
|||
"is_restricted",
|
||||
]
|
||||
|
||||
@extend_schema_field(
|
||||
inline_serializer(
|
||||
name="render_for_web",
|
||||
fields={
|
||||
"title": serializers.CharField(),
|
||||
"message": serializers.CharField(),
|
||||
"image_url": serializers.CharField(),
|
||||
"source_link": serializers.CharField(),
|
||||
},
|
||||
)
|
||||
)
|
||||
def get_render_for_web(self, obj):
|
||||
if not obj.last_alert:
|
||||
return {}
|
||||
|
|
@ -149,6 +174,7 @@ class AlertGroupListSerializer(EagerLoadingMixin, AlertGroupFieldsCacheSerialize
|
|||
)
|
||||
|
||||
def get_render_for_classic_markdown(self, obj):
|
||||
"""Deprecated. TODO: remove"""
|
||||
if not obj.last_alert:
|
||||
return {}
|
||||
return AlertGroupFieldsCacheSerializerMixin.get_or_set_web_template_field(
|
||||
|
|
@ -158,6 +184,7 @@ class AlertGroupListSerializer(EagerLoadingMixin, AlertGroupFieldsCacheSerialize
|
|||
AlertGroupClassicMarkdownRenderer,
|
||||
)
|
||||
|
||||
@extend_schema_field(UserShortSerializer(many=True))
|
||||
def get_related_users(self, obj):
|
||||
users_ids = set()
|
||||
users = []
|
||||
|
|
@ -166,21 +193,21 @@ class AlertGroupListSerializer(EagerLoadingMixin, AlertGroupFieldsCacheSerialize
|
|||
# when def acknowledge/resolve are called in view.
|
||||
if obj.resolved_by_user:
|
||||
users_ids.add(obj.resolved_by_user.public_primary_key)
|
||||
users.append(obj.resolved_by_user.short())
|
||||
users.append(obj.resolved_by_user)
|
||||
|
||||
if obj.acknowledged_by_user and obj.acknowledged_by_user.public_primary_key not in users_ids:
|
||||
users_ids.add(obj.acknowledged_by_user.public_primary_key)
|
||||
users.append(obj.acknowledged_by_user.short())
|
||||
users.append(obj.acknowledged_by_user)
|
||||
|
||||
if obj.silenced_by_user and obj.silenced_by_user.public_primary_key not in users_ids:
|
||||
users_ids.add(obj.silenced_by_user.public_primary_key)
|
||||
users.append(obj.silenced_by_user.short())
|
||||
users.append(obj.silenced_by_user)
|
||||
|
||||
for log_record in obj.log_records.all():
|
||||
if log_record.author is not None and log_record.author.public_primary_key not in users_ids:
|
||||
users.append(log_record.author.short())
|
||||
users.append(log_record.author)
|
||||
users_ids.add(log_record.author.public_primary_key)
|
||||
return users
|
||||
return UserShortSerializer(users, many=True).data
|
||||
|
||||
|
||||
class AlertGroupSerializer(AlertGroupListSerializer):
|
||||
|
|
@ -198,7 +225,7 @@ class AlertGroupSerializer(AlertGroupListSerializer):
|
|||
"paged_users",
|
||||
]
|
||||
|
||||
def get_last_alert_at(self, obj):
|
||||
def get_last_alert_at(self, obj) -> datetime.datetime:
|
||||
last_alert = obj.alerts.last()
|
||||
|
||||
if not last_alert:
|
||||
|
|
@ -206,6 +233,7 @@ class AlertGroupSerializer(AlertGroupListSerializer):
|
|||
|
||||
return last_alert.created_at
|
||||
|
||||
@extend_schema_field(AlertSerializer(many=True))
|
||||
def get_limited_alerts(self, obj):
|
||||
"""
|
||||
Overriding default alerts because there are alert_groups with thousands of them.
|
||||
|
|
@ -214,5 +242,8 @@ class AlertGroupSerializer(AlertGroupListSerializer):
|
|||
alerts = obj.alerts.order_by("-pk")[:100]
|
||||
return AlertSerializer(alerts, many=True).data
|
||||
|
||||
@extend_schema_field(UserShortSerializer(many=True))
|
||||
def get_paged_users(self, obj):
|
||||
return [u.short() for u in obj.get_paged_users()]
|
||||
paged_users = obj.get_paged_users()
|
||||
serializer = UserShortSerializer(paged_users, many=True)
|
||||
return serializer.data
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from django.utils import timezone
|
||||
from rest_framework import serializers
|
||||
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallSchedule
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleWeb
|
||||
from apps.user_management.models import User
|
||||
from common.api_helpers.custom_fields import (
|
||||
OrganizationFilteredPrimaryKeyRelatedField,
|
||||
|
|
@ -87,6 +87,11 @@ class OnCallShiftSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
raise serializers.ValidationError(["Invalid day value."])
|
||||
return by_day
|
||||
|
||||
def _validate_type(self, schedule, event_type):
|
||||
if schedule and not isinstance(schedule, OnCallScheduleWeb) and event_type != CustomOnCallShift.TYPE_OVERRIDE:
|
||||
# if this is not related to a web schedule, only allow override web events
|
||||
raise serializers.ValidationError({"type": ["Invalid event type"]})
|
||||
|
||||
def validate_week_start(self, week_start):
|
||||
if week_start is None:
|
||||
week_start = CustomOnCallShift.MONDAY
|
||||
|
|
@ -158,6 +163,7 @@ class OnCallShiftSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
"priority_level",
|
||||
"rotation_start",
|
||||
]
|
||||
self._validate_type(validated_data.get("schedule"), event_type)
|
||||
if event_type == CustomOnCallShift.TYPE_OVERRIDE:
|
||||
for field in fields_to_update_for_overrides:
|
||||
value = None
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class OrganizationSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
name = serializers.CharField(required=False, allow_null=True, allow_blank=True, source="org_title")
|
||||
slack_channel = serializers.SerializerMethodField()
|
||||
|
||||
rbac_enabled = serializers.BooleanField(source="is_rbac_permissions_enabled")
|
||||
rbac_enabled = serializers.BooleanField(read_only=True, source="is_rbac_permissions_enabled")
|
||||
|
||||
SELECT_RELATED = ["slack_team_identity"]
|
||||
|
||||
|
|
|
|||
|
|
@ -239,3 +239,25 @@ class FilterUserSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
"pk",
|
||||
"username",
|
||||
]
|
||||
|
||||
|
||||
class UserShortSerializer(serializers.ModelSerializer):
|
||||
username = serializers.CharField()
|
||||
pk = serializers.CharField(source="public_primary_key")
|
||||
avatar = serializers.CharField(source="avatar_url")
|
||||
avatar_full = serializers.CharField(source="avatar_full_url")
|
||||
|
||||
class Meta:
|
||||
model = User
|
||||
fields = [
|
||||
"username",
|
||||
"pk",
|
||||
"avatar",
|
||||
"avatar_full",
|
||||
]
|
||||
read_only_fields = [
|
||||
"username",
|
||||
"pk",
|
||||
"avatar",
|
||||
"avatar_full",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from rest_framework.response import Response
|
|||
from rest_framework.test import APIClient
|
||||
|
||||
from apps.api.permissions import LegacyAccessControlRole
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleWeb
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleWeb
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
|
|
@ -59,6 +59,46 @@ def test_create_on_call_shift_rotation(on_call_shift_internal_api_setup, make_us
|
|||
assert mock_refresh_schedule.called
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_on_call_shift_rotation_invalid_type(
|
||||
make_organization_and_user_with_plugin_token,
|
||||
make_schedule,
|
||||
make_user_auth_headers,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_plugin_token()
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:oncall_shifts-list")
|
||||
start_date = timezone.now().replace(microsecond=0, tzinfo=None)
|
||||
|
||||
data = {
|
||||
"name": "Test Shift",
|
||||
"type": CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
|
||||
"schedule": schedule.public_primary_key,
|
||||
"priority_level": 1,
|
||||
"shift_start": start_date.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"shift_end": (start_date + timezone.timedelta(hours=2)).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"rotation_start": start_date.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"until": None,
|
||||
"frequency": 1,
|
||||
"interval": 1,
|
||||
"by_day": [
|
||||
CustomOnCallShift.ICAL_WEEKDAY_MAP[CustomOnCallShift.MONDAY],
|
||||
CustomOnCallShift.ICAL_WEEKDAY_MAP[CustomOnCallShift.FRIDAY],
|
||||
],
|
||||
"week_start": CustomOnCallShift.ICAL_WEEKDAY_MAP[CustomOnCallShift.MONDAY],
|
||||
"rolling_users": [[user.public_primary_key]],
|
||||
}
|
||||
|
||||
with patch("apps.schedules.models.CustomOnCallShift.refresh_schedule") as mock_refresh_schedule:
|
||||
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.data["type"][0] == "Invalid event type"
|
||||
assert not mock_refresh_schedule.called
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_on_call_shift_rotation_missing_users(on_call_shift_internal_api_setup, make_user_auth_headers):
|
||||
token, user1, user2, _, schedule = on_call_shift_internal_api_setup
|
||||
|
|
@ -1557,6 +1597,42 @@ def test_on_call_shift_preview(
|
|||
assert returned_events == expected_events
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_on_call_shift_preview_invalid_type(
|
||||
make_organization_and_user_with_plugin_token,
|
||||
make_user_auth_headers,
|
||||
make_schedule,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_plugin_token()
|
||||
client = APIClient()
|
||||
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
|
||||
|
||||
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
start_date = now - timezone.timedelta(days=7)
|
||||
request_date = start_date
|
||||
|
||||
url = "{}?date={}&days={}".format(
|
||||
reverse("api-internal:oncall_shifts-preview"), request_date.strftime("%Y-%m-%d"), 1
|
||||
)
|
||||
shift_start = (start_date + timezone.timedelta(hours=12)).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
shift_end = (start_date + timezone.timedelta(hours=13)).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
shift_data = {
|
||||
"schedule": schedule.public_primary_key,
|
||||
"type": CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
|
||||
"rotation_start": shift_start,
|
||||
"shift_start": shift_start,
|
||||
"shift_end": shift_end,
|
||||
"rolling_users": [[user.public_primary_key]],
|
||||
"priority_level": 2,
|
||||
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
|
||||
"interval": 1,
|
||||
}
|
||||
response = client.post(url, shift_data, format="json", **make_user_auth_headers(user, token))
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.data["type"][0] == "Invalid event type"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_on_call_shift_preview_without_users(
|
||||
make_organization_and_user_with_plugin_token,
|
||||
|
|
|
|||
|
|
@ -26,6 +26,22 @@ def test_get_organization_rbac_enabled(
|
|||
assert response.json()["rbac_enabled"] == rbac_enabled
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_update_organization_settings(make_organization_and_user_with_plugin_token, make_user_auth_headers):
|
||||
organization, user, token = make_organization_and_user_with_plugin_token()
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:api-organization")
|
||||
data = {"is_resolution_note_required": True}
|
||||
|
||||
assert organization.is_resolution_note_required is False
|
||||
|
||||
response = client.put(url, format="json", data=data, **make_user_auth_headers(user, token))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
organization.refresh_from_db()
|
||||
assert organization.is_resolution_note_required is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"role,expected_status",
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ from django.db.models import Count, Max, Q
|
|||
from django.utils import timezone
|
||||
from django_filters import rest_framework as filters
|
||||
from django_filters.widgets import RangeWidget
|
||||
from rest_framework import mixins, status, viewsets
|
||||
from drf_spectacular.utils import extend_schema, inline_serializer
|
||||
from rest_framework import mixins, serializers, status, viewsets
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.exceptions import NotFound
|
||||
from rest_framework.filters import SearchFilter
|
||||
|
|
@ -395,8 +396,10 @@ class AlertGroupView(
|
|||
|
||||
return alert_groups
|
||||
|
||||
@extend_schema(responses=inline_serializer(name="AlertGroupStats", fields={"count": serializers.IntegerField()}))
|
||||
@action(detail=False)
|
||||
def stats(self, *args, **kwargs):
|
||||
"""Return number of alert groups capped at 100001"""
|
||||
MAX_COUNT = 100001
|
||||
alert_groups = self.filter_queryset(self.get_queryset())[:MAX_COUNT]
|
||||
count = alert_groups.count()
|
||||
|
|
@ -492,6 +495,9 @@ class AlertGroupView(
|
|||
|
||||
@action(methods=["post"], detail=True)
|
||||
def attach(self, request, pk=None):
|
||||
"""
|
||||
Attach alert group to another alert group
|
||||
"""
|
||||
alert_group = self.get_object()
|
||||
if alert_group.is_maintenance_incident:
|
||||
raise BadRequest(detail="Can't attach maintenance alert group")
|
||||
|
|
@ -537,6 +543,13 @@ class AlertGroupView(
|
|||
alert_group.silence_by_user(request.user, silence_delay=delay, action_source=ActionSource.WEB)
|
||||
return Response(AlertGroupSerializer(alert_group, context={"request": request}).data)
|
||||
|
||||
@extend_schema(
|
||||
responses=inline_serializer(
|
||||
name="silence_options",
|
||||
fields={"value": serializers.CharField(), "display_name": serializers.CharField()},
|
||||
many=True,
|
||||
)
|
||||
)
|
||||
@action(methods=["get"], detail=False)
|
||||
def silence_options(self, request):
|
||||
data = [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
from django.conf import settings
|
||||
from drf_spectacular.utils import OpenApiExample, extend_schema
|
||||
from rest_framework import serializers
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.views import APIView
|
||||
|
||||
|
|
@ -21,8 +23,19 @@ class FeaturesAPIView(APIView):
|
|||
|
||||
authentication_classes = (PluginAuthentication,)
|
||||
|
||||
@extend_schema(
|
||||
request=None,
|
||||
responses=serializers.ListField(child=serializers.CharField()),
|
||||
examples=[
|
||||
OpenApiExample(
|
||||
name="Example response",
|
||||
value=["slack", "telegram", "grafana_cloud_connection", "live_settings", "grafana_cloud_notifications"],
|
||||
)
|
||||
],
|
||||
)
|
||||
def get(self, request):
|
||||
return Response(self._get_enabled_features(request))
|
||||
data = self._get_enabled_features(request)
|
||||
return Response(data)
|
||||
|
||||
def _get_enabled_features(self, request):
|
||||
enabled_features = []
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from apps.mobile_app.auth import MobileAppAuthTokenAuthentication
|
|||
from apps.schedules import exceptions
|
||||
from apps.schedules.models import ShiftSwapRequest
|
||||
from apps.schedules.tasks.shift_swaps import create_shift_swap_request_message, update_shift_swap_request_message
|
||||
from apps.user_management.models import User
|
||||
from common.api_helpers.exceptions import BadRequest
|
||||
from common.api_helpers.mixins import PublicPrimaryKeyMixin
|
||||
from common.api_helpers.paginators import FiftyPageSizePaginator
|
||||
|
|
@ -22,7 +23,66 @@ from common.insight_log import EntityEvent, write_resource_insight_log
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ShiftSwapViewSet(PublicPrimaryKeyMixin[ShiftSwapRequest], ModelViewSet):
|
||||
class BaseShiftSwapViewSet(ModelViewSet):
|
||||
model = ShiftSwapRequest
|
||||
serializer_class = ShiftSwapRequestSerializer
|
||||
pagination_class = FiftyPageSizePaginator
|
||||
|
||||
def _do_create(self, beneficiary: User, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
shift_swap_request = serializer.save(beneficiary=beneficiary)
|
||||
|
||||
write_resource_insight_log(instance=shift_swap_request, author=self.request.user, event=EntityEvent.CREATED)
|
||||
|
||||
create_shift_swap_request_message.apply_async((shift_swap_request.pk,))
|
||||
|
||||
def _do_take(self, benefactor: User) -> dict:
|
||||
shift_swap = self.get_object()
|
||||
|
||||
try:
|
||||
shift_swap.take(benefactor)
|
||||
except exceptions.ShiftSwapRequestNotOpenForTaking:
|
||||
raise BadRequest(detail="The shift swap request is not in a state which allows it to be taken")
|
||||
except exceptions.BeneficiaryCannotTakeOwnShiftSwapRequest:
|
||||
raise BadRequest(detail="A shift swap request cannot be created and taken by the same user")
|
||||
|
||||
return ShiftSwapRequestSerializer(shift_swap).data
|
||||
|
||||
def get_serializer_class(self):
|
||||
return ShiftSwapRequestListSerializer if self.action == "list" else super().get_serializer_class()
|
||||
|
||||
def get_queryset(self):
|
||||
queryset = ShiftSwapRequest.objects.filter(schedule__organization=self.request.auth.organization)
|
||||
return self.serializer_class.setup_eager_loading(queryset)
|
||||
|
||||
def perform_destroy(self, instance: ShiftSwapRequest) -> None:
|
||||
# TODO: should we allow deleting a taken request?
|
||||
|
||||
super().perform_destroy(instance)
|
||||
write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.DELETED)
|
||||
|
||||
update_shift_swap_request_message.apply_async((instance.pk,))
|
||||
|
||||
def perform_create(self, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
# default to create swap request with logged in user as beneficiary
|
||||
self._do_create(self.request.user, serializer=serializer)
|
||||
|
||||
def perform_update(self, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
prev_state = serializer.instance.insight_logs_serialized
|
||||
serializer.save()
|
||||
shift_swap_request = serializer.instance
|
||||
|
||||
write_resource_insight_log(
|
||||
instance=shift_swap_request,
|
||||
author=self.request.user,
|
||||
event=EntityEvent.UPDATED,
|
||||
prev_state=prev_state,
|
||||
new_state=shift_swap_request.insight_logs_serialized,
|
||||
)
|
||||
|
||||
update_shift_swap_request_message.apply_async((shift_swap_request.pk,))
|
||||
|
||||
|
||||
class ShiftSwapViewSet(PublicPrimaryKeyMixin[ShiftSwapRequest], BaseShiftSwapViewSet):
|
||||
authentication_classes = (MobileAppAuthTokenAuthentication, PluginAuthentication)
|
||||
permission_classes = (IsAuthenticated, RBACPermission)
|
||||
|
||||
|
|
@ -49,57 +109,7 @@ class ShiftSwapViewSet(PublicPrimaryKeyMixin[ShiftSwapRequest], ModelViewSet):
|
|||
],
|
||||
}
|
||||
|
||||
model = ShiftSwapRequest
|
||||
serializer_class = ShiftSwapRequestSerializer
|
||||
pagination_class = FiftyPageSizePaginator
|
||||
|
||||
def get_serializer_class(self):
|
||||
return ShiftSwapRequestListSerializer if self.action == "list" else super().get_serializer_class()
|
||||
|
||||
def get_queryset(self):
|
||||
queryset = ShiftSwapRequest.objects.filter(schedule__organization=self.request.auth.organization)
|
||||
return self.serializer_class.setup_eager_loading(queryset)
|
||||
|
||||
def perform_destroy(self, instance: ShiftSwapRequest) -> None:
|
||||
# TODO: should we allow deleting a taken request?
|
||||
|
||||
super().perform_destroy(instance)
|
||||
write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.DELETED)
|
||||
|
||||
update_shift_swap_request_message.apply_async((instance.pk,))
|
||||
|
||||
def perform_create(self, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
beneficiary = self.request.user
|
||||
shift_swap_request = serializer.save(beneficiary=beneficiary)
|
||||
|
||||
write_resource_insight_log(instance=shift_swap_request, author=beneficiary, event=EntityEvent.CREATED)
|
||||
|
||||
create_shift_swap_request_message.apply_async((shift_swap_request.pk,))
|
||||
|
||||
def perform_update(self, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
prev_state = serializer.instance.insight_logs_serialized
|
||||
serializer.save()
|
||||
shift_swap_request = serializer.instance
|
||||
|
||||
write_resource_insight_log(
|
||||
instance=shift_swap_request,
|
||||
author=self.request.user,
|
||||
event=EntityEvent.UPDATED,
|
||||
prev_state=prev_state,
|
||||
new_state=shift_swap_request.insight_logs_serialized,
|
||||
)
|
||||
|
||||
update_shift_swap_request_message.apply_async((shift_swap_request.pk,))
|
||||
|
||||
@action(methods=["post"], detail=True)
|
||||
def take(self, request: AuthenticatedRequest, pk: str) -> Response:
|
||||
shift_swap = self.get_object()
|
||||
|
||||
try:
|
||||
shift_swap.take(request.user)
|
||||
except exceptions.ShiftSwapRequestNotOpenForTaking:
|
||||
raise BadRequest(detail="The shift swap request is not in a state which allows it to be taken")
|
||||
except exceptions.BeneficiaryCannotTakeOwnShiftSwapRequest:
|
||||
raise BadRequest(detail="A shift swap request cannot be created and taken by the same user")
|
||||
|
||||
return Response(ShiftSwapRequestSerializer(shift_swap).data, status=status.HTTP_200_OK)
|
||||
serialized_shift_swap = self._do_take(benefactor=request.user)
|
||||
return Response(serialized_shift_swap, status=status.HTTP_200_OK)
|
||||
|
|
|
|||
|
|
@ -483,27 +483,17 @@ def conditionally_send_going_oncall_push_notifications_for_all_schedules() -> No
|
|||
|
||||
# TODO: break down tasks.py into multiple files
|
||||
|
||||
# Don't send notifications for shift swap requests that start more than 4 weeks in the future
|
||||
SSR_EARLIEST_NOTIFICATION_OFFSET = datetime.timedelta(weeks=4)
|
||||
|
||||
# Once it's time to send out notifications, send them over the course of a week.
|
||||
# This is because users can be in multiple timezones / have different working hours configured,
|
||||
# so we can't just send all notifications at once, but need to wait for the users to be in their working hours.
|
||||
# Once a notification is sent to a user, they won't be notified again for the same shift swap request for a week.
|
||||
# After a week, the shift swap request won't be in the notification window anymore (see _get_shift_swap_requests_to_notify).
|
||||
SSR_NOTIFICATION_WINDOW = datetime.timedelta(weeks=1)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task()
|
||||
def notify_shift_swap_requests() -> None:
|
||||
"""
|
||||
A periodic task that notifies users about shift swap requests.
|
||||
"""
|
||||
for shift_swap_request in _get_shift_swap_requests_to_notify(timezone.now()):
|
||||
notify_shift_swap_request.delay(shift_swap_request.pk)
|
||||
for shift_swap_request, timeout in _get_shift_swap_requests_to_notify(timezone.now()):
|
||||
notify_shift_swap_request.delay(shift_swap_request.pk, timeout)
|
||||
|
||||
|
||||
def _get_shift_swap_requests_to_notify(now: datetime.datetime) -> list[ShiftSwapRequest]:
|
||||
def _get_shift_swap_requests_to_notify(now: datetime.datetime) -> list[tuple[ShiftSwapRequest, int]]:
|
||||
"""
|
||||
Returns shifts swap requests that are open and are in the notification window.
|
||||
This method can return the same shift swap request multiple times while it's in the notification window,
|
||||
|
|
@ -511,20 +501,30 @@ def _get_shift_swap_requests_to_notify(now: datetime.datetime) -> list[ShiftSwap
|
|||
"""
|
||||
|
||||
shift_swap_requests_in_notification_window = []
|
||||
for shift_swap_request in ShiftSwapRequest.objects.filter(benefactor__isnull=True, swap_start__gt=now):
|
||||
notification_window_start = max(
|
||||
shift_swap_request.created_at, shift_swap_request.swap_start - SSR_EARLIEST_NOTIFICATION_OFFSET
|
||||
)
|
||||
notification_window_end = min(notification_window_start + SSR_NOTIFICATION_WINDOW, shift_swap_request.swap_end)
|
||||
for shift_swap_request in ShiftSwapRequest.objects.get_open_requests(now):
|
||||
for idx, offset in enumerate(ShiftSwapRequest.FOLLOWUP_OFFSETS):
|
||||
next_offset = (
|
||||
ShiftSwapRequest.FOLLOWUP_OFFSETS[idx + 1]
|
||||
if idx + 1 < len(ShiftSwapRequest.FOLLOWUP_OFFSETS)
|
||||
else datetime.timedelta(0)
|
||||
)
|
||||
window = offset - next_offset - timezone.timedelta(microseconds=1) # check SSRs up to the next offset
|
||||
|
||||
if notification_window_start <= now <= notification_window_end:
|
||||
shift_swap_requests_in_notification_window.append(shift_swap_request)
|
||||
notification_window_start = shift_swap_request.swap_start - offset
|
||||
notification_window_end = notification_window_start + window
|
||||
|
||||
if notification_window_start <= now <= notification_window_end:
|
||||
next_notification_dt = shift_swap_request.swap_start - next_offset
|
||||
timeout = math.ceil((next_notification_dt - now).total_seconds()) # don't send notifications twice
|
||||
|
||||
shift_swap_requests_in_notification_window.append((shift_swap_request, timeout))
|
||||
break
|
||||
|
||||
return shift_swap_requests_in_notification_window
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def notify_shift_swap_request(shift_swap_request_pk: int) -> None:
|
||||
def notify_shift_swap_request(shift_swap_request_pk: int, timeout: int) -> None:
|
||||
"""
|
||||
Notify relevant users for an individual shift swap request.
|
||||
"""
|
||||
|
|
@ -538,7 +538,7 @@ def notify_shift_swap_request(shift_swap_request_pk: int) -> None:
|
|||
for user in shift_swap_request.possible_benefactors:
|
||||
if _should_notify_user_about_shift_swap_request(shift_swap_request, user, now):
|
||||
notify_user_about_shift_swap_request.delay(shift_swap_request.pk, user.pk)
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, user)
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, user, timeout)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
|
|
@ -595,16 +595,16 @@ def _should_notify_user_about_shift_swap_request(
|
|||
except MobileAppUserSettings.DoesNotExist:
|
||||
return False # don't notify if the app is not configured
|
||||
|
||||
return (
|
||||
mobile_app_user_settings.info_notifications_enabled # info notifications must be enabled
|
||||
and user.is_in_working_hours(now, mobile_app_user_settings.time_zone) # user must be in working hours
|
||||
and not _has_user_been_notified_for_shift_swap_request(shift_swap_request, user) # don't notify twice
|
||||
return user.is_in_working_hours( # user must be in working hours
|
||||
now, mobile_app_user_settings.time_zone
|
||||
) and not _has_user_been_notified_for_shift_swap_request( # don't notify twice
|
||||
shift_swap_request, user
|
||||
)
|
||||
|
||||
|
||||
def _mark_shift_swap_request_notified_for_user(shift_swap_request: ShiftSwapRequest, user: User) -> None:
|
||||
def _mark_shift_swap_request_notified_for_user(shift_swap_request: ShiftSwapRequest, user: User, timeout: int) -> None:
|
||||
key = _shift_swap_request_cache_key(shift_swap_request, user)
|
||||
cache.set(key, True, timeout=SSR_NOTIFICATION_WINDOW.total_seconds())
|
||||
cache.set(key, True, timeout=timeout)
|
||||
|
||||
|
||||
def _has_user_been_notified_for_shift_swap_request(shift_swap_request: ShiftSwapRequest, user: User) -> bool:
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ from firebase_admin.messaging import Message
|
|||
|
||||
from apps.mobile_app.models import FCMDevice, MobileAppUserSettings
|
||||
from apps.mobile_app.tasks import (
|
||||
SSR_EARLIEST_NOTIFICATION_OFFSET,
|
||||
SSR_NOTIFICATION_WINDOW,
|
||||
_get_shift_swap_requests_to_notify,
|
||||
_has_user_been_notified_for_shift_swap_request,
|
||||
_mark_shift_swap_request_notified_for_user,
|
||||
|
|
@ -22,88 +20,70 @@ from apps.user_management.models import User
|
|||
from apps.user_management.models.user import default_working_hours
|
||||
|
||||
MICROSECOND = timezone.timedelta(microseconds=1)
|
||||
|
||||
|
||||
def test_window_more_than_24_hours():
|
||||
"""
|
||||
SSR_NOTIFICATION_WINDOW must be more than one week, otherwise it's not possible to guarantee that the
|
||||
notification will be sent according to users' working hours. For example, if user only works on Fridays 10am-2pm,
|
||||
and a shift swap request is created on Friday 3pm, we must wait for a whole week to send the notification.
|
||||
"""
|
||||
assert SSR_NOTIFICATION_WINDOW >= timezone.timedelta(weeks=1)
|
||||
TIMEOUT = 123
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_shift_swap_requests_to_notify_starts_soon(
|
||||
make_organization, make_user, make_schedule, make_shift_swap_request
|
||||
):
|
||||
def test_get_shift_swap_requests_to_notify(make_organization, make_user, make_schedule, make_shift_swap_request):
|
||||
organization = make_organization()
|
||||
user = make_user(organization=organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=10)
|
||||
swap_start = timezone.now()
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
shift_swap_request = make_shift_swap_request(
|
||||
schedule, user, swap_start=swap_start, swap_end=swap_end, created_at=now
|
||||
schedule, user, swap_start=swap_start, swap_end=swap_end, created_at=swap_start - timezone.timedelta(days=27)
|
||||
)
|
||||
|
||||
assert _get_shift_swap_requests_to_notify(now - MICROSECOND) == []
|
||||
assert _get_shift_swap_requests_to_notify(now) == [shift_swap_request]
|
||||
assert _get_shift_swap_requests_to_notify(now + SSR_NOTIFICATION_WINDOW) == [shift_swap_request]
|
||||
assert _get_shift_swap_requests_to_notify(now + SSR_NOTIFICATION_WINDOW + MICROSECOND) == []
|
||||
def _timeout(**kwargs):
|
||||
return int(timezone.timedelta(**kwargs).total_seconds())
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_shift_swap_requests_to_notify_starts_very_soon(
|
||||
make_organization, make_user, make_schedule, make_shift_swap_request
|
||||
):
|
||||
organization = make_organization()
|
||||
user = make_user(organization=organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(minutes=1)
|
||||
swap_end = swap_start + timezone.timedelta(minutes=10)
|
||||
|
||||
shift_swap_request = make_shift_swap_request(
|
||||
schedule, user, swap_start=swap_start, swap_end=swap_end, created_at=now
|
||||
)
|
||||
|
||||
assert _get_shift_swap_requests_to_notify(now - MICROSECOND) == []
|
||||
assert _get_shift_swap_requests_to_notify(now) == [shift_swap_request]
|
||||
assert _get_shift_swap_requests_to_notify(now + timezone.timedelta(minutes=1)) == []
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_shift_swap_requests_to_notify_starts_not_soon(
|
||||
make_organization, make_user, make_schedule, make_shift_swap_request
|
||||
):
|
||||
organization = make_organization()
|
||||
user = make_user(organization=organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=100)
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
shift_swap_request = make_shift_swap_request(
|
||||
schedule, user, swap_start=swap_start, swap_end=swap_end, created_at=now
|
||||
)
|
||||
|
||||
assert _get_shift_swap_requests_to_notify(now) == []
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - SSR_EARLIEST_NOTIFICATION_OFFSET - MICROSECOND) == []
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - SSR_EARLIEST_NOTIFICATION_OFFSET) == [shift_swap_request]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=28, microseconds=1)) == []
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=28)) == [
|
||||
(shift_swap_request, _timeout(days=7))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=27)) == [
|
||||
(shift_swap_request, _timeout(days=6))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=21)) == [
|
||||
(shift_swap_request, _timeout(days=7))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=14)) == [
|
||||
(shift_swap_request, _timeout(days=7))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=10)) == [
|
||||
(shift_swap_request, _timeout(days=3))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=7)) == [
|
||||
(shift_swap_request, _timeout(days=4))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=3)) == [
|
||||
(shift_swap_request, _timeout(days=1))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=2)) == [
|
||||
(shift_swap_request, _timeout(days=1))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(days=1)) == [
|
||||
(shift_swap_request, _timeout(hours=12))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(hours=18)) == [
|
||||
(shift_swap_request, _timeout(hours=6))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(hours=12)) == [
|
||||
(shift_swap_request, _timeout(hours=12))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(hours=11)) == [
|
||||
(shift_swap_request, _timeout(hours=11))
|
||||
]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start - timezone.timedelta(seconds=1)) == [
|
||||
(shift_swap_request, _timeout(seconds=1))
|
||||
]
|
||||
# check that the timeout is ceil-ed to the next second
|
||||
assert _get_shift_swap_requests_to_notify(
|
||||
swap_start - SSR_EARLIEST_NOTIFICATION_OFFSET + SSR_NOTIFICATION_WINDOW
|
||||
) == [shift_swap_request]
|
||||
assert (
|
||||
_get_shift_swap_requests_to_notify(
|
||||
swap_start - SSR_EARLIEST_NOTIFICATION_OFFSET + SSR_NOTIFICATION_WINDOW + MICROSECOND
|
||||
)
|
||||
== []
|
||||
)
|
||||
swap_start - timezone.timedelta(seconds=1) + timezone.timedelta(milliseconds=600)
|
||||
) == [(shift_swap_request, _timeout(seconds=1))]
|
||||
assert _get_shift_swap_requests_to_notify(swap_start) == []
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
|
@ -123,12 +103,12 @@ def test_notify_shift_swap_requests(make_organization, make_user, make_schedule,
|
|||
with patch.object(notify_shift_swap_request, "delay") as mock_notify_shift_swap_request:
|
||||
with patch(
|
||||
"apps.mobile_app.tasks._get_shift_swap_requests_to_notify",
|
||||
return_value=ShiftSwapRequest.objects.filter(pk=shift_swap_request.pk),
|
||||
return_value=[(ShiftSwapRequest.objects.filter(pk=shift_swap_request.pk).first(), TIMEOUT)],
|
||||
) as mock_get_shift_swap_requests_to_notify:
|
||||
notify_shift_swap_requests()
|
||||
|
||||
mock_get_shift_swap_requests_to_notify.assert_called_once()
|
||||
mock_notify_shift_swap_request.assert_called_once_with(shift_swap_request.pk)
|
||||
mock_notify_shift_swap_request.assert_called_once_with(shift_swap_request.pk, TIMEOUT)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
|
@ -153,7 +133,7 @@ def test_notify_shift_swap_request(make_organization, make_user, make_schedule,
|
|||
"possible_benefactors",
|
||||
new_callable=PropertyMock(return_value=User.objects.filter(pk=other_user.pk)),
|
||||
):
|
||||
notify_shift_swap_request(shift_swap_request.pk)
|
||||
notify_shift_swap_request(shift_swap_request.pk, TIMEOUT)
|
||||
|
||||
mock_notify_user_about_shift_swap_request.assert_called_once_with(shift_swap_request.pk, other_user.pk)
|
||||
|
||||
|
|
@ -182,7 +162,7 @@ def test_notify_shift_swap_request_should_not_notify_user(
|
|||
"possible_benefactors",
|
||||
new_callable=PropertyMock(return_value=User.objects.filter(pk=other_user.pk)),
|
||||
):
|
||||
notify_shift_swap_request(shift_swap_request.pk)
|
||||
notify_shift_swap_request(shift_swap_request.pk, TIMEOUT)
|
||||
|
||||
mock_notify_user_about_shift_swap_request.assert_not_called()
|
||||
|
||||
|
|
@ -230,7 +210,7 @@ def test_notify_shift_swap_request_success(
|
|||
)
|
||||
|
||||
with patch.object(notify_user_about_shift_swap_request, "delay") as mock_notify_user_about_shift_swap_request:
|
||||
notify_shift_swap_request(shift_swap_request.pk)
|
||||
notify_shift_swap_request(shift_swap_request.pk, TIMEOUT)
|
||||
|
||||
mock_notify_user_about_shift_swap_request.assert_called_once_with(shift_swap_request.pk, benefactor.pk)
|
||||
|
||||
|
|
@ -245,7 +225,7 @@ def test_notify_user_about_shift_swap_request(make_organization, make_user, make
|
|||
device_to_notify = FCMDevice.objects.create(user=benefactor, registration_id="test_device_id")
|
||||
MobileAppUserSettings.objects.create(user=benefactor, info_notifications_enabled=True)
|
||||
|
||||
now = timezone.datetime(2023, 8, 1, 19, 38, tzinfo=timezone.utc)
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=100)
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
|
|
@ -270,6 +250,32 @@ def test_notify_user_about_shift_swap_request(make_organization, make_user, make
|
|||
assert message.apns.payload.aps.sound.critical is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_notify_user_about_shift_swap_request_info_notifications_disabled(
|
||||
make_organization, make_user, make_schedule, make_shift_swap_request
|
||||
):
|
||||
organization = make_organization()
|
||||
beneficiary = make_user(organization=organization)
|
||||
benefactor = make_user(organization=organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
FCMDevice.objects.create(user=benefactor, registration_id="test_device_id")
|
||||
MobileAppUserSettings.objects.create(user=benefactor, info_notifications_enabled=False)
|
||||
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=100)
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
shift_swap_request = make_shift_swap_request(
|
||||
schedule, beneficiary, swap_start=swap_start, swap_end=swap_end, created_at=now
|
||||
)
|
||||
|
||||
with patch("apps.mobile_app.tasks._send_push_notification") as mock_send_push_notification:
|
||||
notify_user_about_shift_swap_request(shift_swap_request.pk, benefactor.pk)
|
||||
|
||||
mock_send_push_notification.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_should_notify_user(make_organization, make_user, make_schedule, make_shift_swap_request):
|
||||
organization = make_organization()
|
||||
|
|
@ -288,8 +294,11 @@ def test_should_notify_user(make_organization, make_user, make_schedule, make_sh
|
|||
assert not MobileAppUserSettings.objects.exists()
|
||||
assert _should_notify_user_about_shift_swap_request(shift_swap_request, benefactor, now) is False
|
||||
|
||||
# check _should_notify_user_about_shift_swap_request is True when info notifications are disabled
|
||||
mobile_app_settings = MobileAppUserSettings.objects.create(user=benefactor, info_notifications_enabled=False)
|
||||
assert _should_notify_user_about_shift_swap_request(shift_swap_request, benefactor, now) is False
|
||||
with patch.object(benefactor, "is_in_working_hours", return_value=True):
|
||||
with patch("apps.mobile_app.tasks._has_user_been_notified_for_shift_swap_request", return_value=False):
|
||||
assert _should_notify_user_about_shift_swap_request(shift_swap_request, benefactor, now) is True
|
||||
|
||||
mobile_app_settings.info_notifications_enabled = True
|
||||
mobile_app_settings.save(update_fields=["info_notifications_enabled"])
|
||||
|
|
@ -324,9 +333,9 @@ def test_mark_notified(make_organization, make_user, make_schedule, make_shift_s
|
|||
|
||||
cache.clear()
|
||||
assert _has_user_been_notified_for_shift_swap_request(shift_swap_request, benefactor) is False
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, benefactor)
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, benefactor, TIMEOUT)
|
||||
assert _has_user_been_notified_for_shift_swap_request(shift_swap_request, benefactor) is True
|
||||
|
||||
with patch.object(cache, "set") as mock_cache_set:
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, benefactor)
|
||||
assert mock_cache_set.call_args.kwargs["timeout"] == SSR_NOTIFICATION_WINDOW.total_seconds()
|
||||
_mark_shift_swap_request_notified_for_user(shift_swap_request, benefactor, TIMEOUT)
|
||||
assert mock_cache_set.call_args.kwargs["timeout"] == TIMEOUT
|
||||
|
|
|
|||
413
engine/apps/public_api/tests/test_shift_swap.py
Normal file
413
engine/apps/public_api/tests/test_shift_swap.py
Normal file
|
|
@ -0,0 +1,413 @@
|
|||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from django.urls import reverse
|
||||
from django.utils import timezone
|
||||
from rest_framework import status
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallScheduleWeb, ShiftSwapRequest
|
||||
from common.api_helpers.utils import serialize_datetime_as_utc_timestamp
|
||||
from common.insight_log import EntityEvent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_swap(make_user_for_organization, make_schedule, make_shift_swap_request):
|
||||
def _setup_swap(organization, **kwargs):
|
||||
user = make_user_for_organization(organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
tomorrow = today + timezone.timedelta(days=1)
|
||||
two_days_from_now = tomorrow + timezone.timedelta(days=1)
|
||||
|
||||
swap = make_shift_swap_request(schedule, user, swap_start=tomorrow, swap_end=two_days_from_now)
|
||||
return swap
|
||||
|
||||
return _setup_swap
|
||||
|
||||
|
||||
def assert_swap_response(response, request_data):
|
||||
response_data = response.json()
|
||||
swap = ShiftSwapRequest.objects.get(public_primary_key=response_data["id"])
|
||||
# check description
|
||||
assert swap.description == response_data["description"]
|
||||
if "description" in request_data:
|
||||
assert response_data["description"] == request_data["description"]
|
||||
# check datetime fields
|
||||
for field in ("swap_start", "swap_end"):
|
||||
db_value = serialize_datetime_as_utc_timestamp(getattr(swap, field))
|
||||
assert db_value == response_data[field]
|
||||
if field in request_data:
|
||||
assert db_value == request_data[field]
|
||||
# check FK fields
|
||||
for field in ("schedule", "beneficiary", "benefactor"):
|
||||
value = response_data[field]
|
||||
if value:
|
||||
assert getattr(swap, field).public_primary_key == response_data[field]
|
||||
else:
|
||||
assert getattr(swap, field) is None
|
||||
if field in request_data:
|
||||
assert value == request_data[field]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_list_filters(
|
||||
make_organization_and_user_with_token,
|
||||
make_user_for_organization,
|
||||
make_schedule,
|
||||
make_shift_swap_request,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
user2 = make_user_for_organization(organization)
|
||||
|
||||
schedule1 = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
schedule2 = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
yesterday = today - timezone.timedelta(days=1)
|
||||
tomorrow = today + timezone.timedelta(days=1)
|
||||
two_days_from_now = tomorrow + timezone.timedelta(days=1)
|
||||
|
||||
# open
|
||||
swap1 = make_shift_swap_request(schedule1, user, swap_start=tomorrow, swap_end=two_days_from_now)
|
||||
# past due
|
||||
swap2 = make_shift_swap_request(schedule1, user2, swap_start=yesterday, swap_end=today)
|
||||
# past due / in-progress
|
||||
swap3 = make_shift_swap_request(schedule2, user2, swap_start=today, swap_end=tomorrow)
|
||||
# taken
|
||||
swap4 = make_shift_swap_request(schedule2, user2, swap_start=tomorrow, swap_end=two_days_from_now, benefactor=user)
|
||||
|
||||
def assert_expected(response, expected):
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
returned = [s["id"] for s in response.json().get("results", [])]
|
||||
assert returned == [s.public_primary_key for s in expected]
|
||||
|
||||
client = APIClient()
|
||||
base_url = reverse("api-public:shift_swap-list")
|
||||
|
||||
url = base_url
|
||||
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_expected(response, (swap1, swap4))
|
||||
|
||||
url = base_url + f"?schedule_id={schedule1.public_primary_key}"
|
||||
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_expected(response, (swap1,))
|
||||
|
||||
url = base_url + "?open_only=true"
|
||||
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_expected(response, (swap1,))
|
||||
|
||||
starting_after = serialize_datetime_as_utc_timestamp(yesterday)
|
||||
url = base_url + f"?beneficiary={user2.public_primary_key}&starting_after={starting_after}"
|
||||
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_expected(response, (swap2, swap3, swap4))
|
||||
|
||||
url = base_url + f"?benefactor={user.public_primary_key}"
|
||||
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_expected(response, (swap4,))
|
||||
|
||||
|
||||
@patch("apps.api.views.shift_swap.write_resource_insight_log")
|
||||
@patch("apps.api.views.shift_swap.create_shift_swap_request_message")
|
||||
@pytest.mark.django_db
|
||||
def test_create(
|
||||
mock_create_shift_swap_request_message,
|
||||
mock_write_resource_insight_log,
|
||||
make_organization_and_user_with_token,
|
||||
make_user_for_organization,
|
||||
make_schedule,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
another_user = make_user_for_organization(organization)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
tomorrow = today + timezone.timedelta(days=1)
|
||||
two_days_from_now = tomorrow + timezone.timedelta(days=1)
|
||||
|
||||
data = {
|
||||
"schedule": schedule.public_primary_key,
|
||||
"description": "Taking a few days off",
|
||||
"swap_start": serialize_datetime_as_utc_timestamp(tomorrow),
|
||||
"swap_end": serialize_datetime_as_utc_timestamp(two_days_from_now),
|
||||
"beneficiary": another_user.public_primary_key,
|
||||
}
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-list")
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
assert_swap_response(response, data)
|
||||
|
||||
ssr = ShiftSwapRequest.objects.get(public_primary_key=response.json()["id"])
|
||||
mock_write_resource_insight_log.assert_called_once_with(instance=ssr, author=user, event=EntityEvent.CREATED)
|
||||
mock_create_shift_swap_request_message.apply_async.assert_called_once_with((ssr.pk,))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_requires_beneficiary(
|
||||
make_organization_and_user_with_token,
|
||||
make_schedule,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
tomorrow = today + timezone.timedelta(days=1)
|
||||
two_days_from_now = tomorrow + timezone.timedelta(days=1)
|
||||
|
||||
data = {
|
||||
"schedule": schedule.public_primary_key,
|
||||
"description": "Taking a few days off",
|
||||
"swap_start": serialize_datetime_as_utc_timestamp(tomorrow),
|
||||
"swap_end": serialize_datetime_as_utc_timestamp(two_days_from_now),
|
||||
}
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-list")
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert ShiftSwapRequest.objects.count() == 0
|
||||
|
||||
|
||||
@patch("apps.api.views.shift_swap.write_resource_insight_log")
|
||||
@patch("apps.api.views.shift_swap.update_shift_swap_request_message")
|
||||
@pytest.mark.django_db
|
||||
def test_update(
|
||||
mock_update_shift_swap_request_message,
|
||||
mock_write_resource_insight_log,
|
||||
make_organization_and_user_with_token,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
swap = setup_swap(organization)
|
||||
assert swap.description is None
|
||||
insights_log_prev_state = swap.insight_logs_serialized
|
||||
|
||||
data = {
|
||||
"description": "Taking a few days off",
|
||||
"schedule": swap.schedule.public_primary_key,
|
||||
"swap_start": serialize_datetime_as_utc_timestamp(swap.swap_start),
|
||||
"swap_end": serialize_datetime_as_utc_timestamp(swap.swap_end),
|
||||
}
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-detail", kwargs={"pk": swap.public_primary_key})
|
||||
response = client.put(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_swap_response(response, data)
|
||||
|
||||
swap.refresh_from_db()
|
||||
mock_write_resource_insight_log.assert_called_once_with(
|
||||
instance=swap,
|
||||
author=user,
|
||||
event=EntityEvent.UPDATED,
|
||||
prev_state=insights_log_prev_state,
|
||||
new_state=swap.insight_logs_serialized,
|
||||
)
|
||||
mock_update_shift_swap_request_message.apply_async.assert_called_once_with((swap.pk,))
|
||||
|
||||
|
||||
@patch("apps.api.views.shift_swap.write_resource_insight_log")
|
||||
@patch("apps.api.views.shift_swap.update_shift_swap_request_message")
|
||||
@pytest.mark.django_db
|
||||
def test_partial_update(
|
||||
mock_update_shift_swap_request_message,
|
||||
mock_write_resource_insight_log,
|
||||
make_organization_and_user_with_token,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
swap = setup_swap(organization)
|
||||
assert swap.description is None
|
||||
insights_log_prev_state = swap.insight_logs_serialized
|
||||
|
||||
data = {"description": "Taking a few days off"}
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-detail", kwargs={"pk": swap.public_primary_key})
|
||||
response = client.patch(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_swap_response(response, data)
|
||||
|
||||
swap.refresh_from_db()
|
||||
mock_write_resource_insight_log.assert_called_once_with(
|
||||
instance=swap,
|
||||
author=user,
|
||||
event=EntityEvent.UPDATED,
|
||||
prev_state=insights_log_prev_state,
|
||||
new_state=swap.insight_logs_serialized,
|
||||
)
|
||||
mock_update_shift_swap_request_message.apply_async.assert_called_once_with((swap.pk,))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_details(
|
||||
make_organization_and_user_with_token,
|
||||
make_on_call_shift,
|
||||
setup_swap,
|
||||
):
|
||||
organization, _, token = make_organization_and_user_with_token()
|
||||
swap = setup_swap(organization)
|
||||
schedule = swap.schedule
|
||||
user = swap.beneficiary
|
||||
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
start = today + timezone.timedelta(days=1)
|
||||
duration = timezone.timedelta(hours=8)
|
||||
data = {
|
||||
"start": start,
|
||||
"rotation_start": start,
|
||||
"duration": duration,
|
||||
"priority_level": 1,
|
||||
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
|
||||
"schedule": schedule,
|
||||
}
|
||||
on_call_shift = make_on_call_shift(
|
||||
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
|
||||
)
|
||||
on_call_shift.add_rolling_users([[user]])
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-detail", kwargs={"pk": swap.public_primary_key})
|
||||
response = client.get(url, HTTP_AUTHORIZATION=f"{token}")
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert_swap_response(response, {})
|
||||
|
||||
# include involved shifts information
|
||||
shifts_data = response.json()["shifts"]
|
||||
assert len(shifts_data) == 1
|
||||
expected = [
|
||||
# start, end, user, swap request ID
|
||||
(
|
||||
start.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
(start + duration).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
user.public_primary_key,
|
||||
swap.public_primary_key,
|
||||
),
|
||||
]
|
||||
returned_events = [
|
||||
(e["start"], e["end"], e["users"][0]["pk"], e["users"][0]["swap_request"]["pk"]) for e in shifts_data
|
||||
]
|
||||
assert returned_events == expected
|
||||
|
||||
|
||||
@patch("apps.api.views.shift_swap.write_resource_insight_log")
|
||||
@patch("apps.api.views.shift_swap.update_shift_swap_request_message")
|
||||
@pytest.mark.django_db
|
||||
def test_delete(
|
||||
mock_update_shift_swap_request_message,
|
||||
mock_write_resource_insight_log,
|
||||
make_organization_and_user_with_token,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
swap = setup_swap(organization)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-detail", kwargs={"pk": swap.public_primary_key})
|
||||
|
||||
response = client.delete(url, HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
response = client.get(url, HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
mock_write_resource_insight_log.assert_called_once_with(
|
||||
instance=swap,
|
||||
author=user,
|
||||
event=EntityEvent.DELETED,
|
||||
)
|
||||
mock_update_shift_swap_request_message.apply_async.assert_called_once_with((swap.pk,))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_take(
|
||||
make_organization_and_user_with_token,
|
||||
make_user_for_organization,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
another_user = make_user_for_organization(organization)
|
||||
swap = setup_swap(organization)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-take", kwargs={"pk": swap.public_primary_key})
|
||||
|
||||
data = {"benefactor": another_user.public_primary_key}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
|
||||
assert_swap_response(response, data)
|
||||
swap.refresh_from_db()
|
||||
assert swap.status == ShiftSwapRequest.Statuses.TAKEN
|
||||
assert swap.benefactor == another_user
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_take_requires_benefactor(
|
||||
make_organization_and_user_with_token,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
swap = setup_swap(organization)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-take", kwargs={"pk": swap.public_primary_key})
|
||||
|
||||
data = {}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
swap.refresh_from_db()
|
||||
assert swap.status == ShiftSwapRequest.Statuses.OPEN
|
||||
assert swap.benefactor is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_take_errors(
|
||||
make_organization_and_user_with_token,
|
||||
make_user_for_organization,
|
||||
setup_swap,
|
||||
):
|
||||
organization, user, token = make_organization_and_user_with_token()
|
||||
another_user = make_user_for_organization(organization)
|
||||
swap = setup_swap(organization)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-public:shift_swap-take", kwargs={"pk": swap.public_primary_key})
|
||||
|
||||
# same user taking the swap
|
||||
data = {"benefactor": swap.beneficiary.public_primary_key}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
# already taken
|
||||
swap.take(another_user)
|
||||
data = {"benefactor": another_user.public_primary_key}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
# deleted
|
||||
swap = setup_swap(organization)
|
||||
swap.delete()
|
||||
data = {"benefactor": another_user.public_primary_key}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
# past due
|
||||
swap = setup_swap(organization)
|
||||
swap.swap_start = timezone.now() - timezone.timedelta(days=2)
|
||||
swap.save()
|
||||
data = {"benefactor": another_user.public_primary_key}
|
||||
response = client.post(url, data, format="json", HTTP_AUTHORIZATION=f"{token}")
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
|
@ -25,6 +25,7 @@ router.register(r"actions", views.ActionView, basename="actions")
|
|||
router.register(r"user_groups", views.UserGroupView, basename="user_groups")
|
||||
router.register(r"on_call_shifts", views.CustomOnCallShiftView, basename="on_call_shifts")
|
||||
router.register(r"teams", views.TeamView, basename="teams")
|
||||
router.register(r"shift_swaps", views.ShiftSwapViewSet, basename="shift_swap")
|
||||
|
||||
|
||||
urlpatterns = [
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from .phone_notifications import MakeCallView, SendSMSView # noqa: F401
|
|||
from .resolution_notes import ResolutionNoteView # noqa: F401
|
||||
from .routes import ChannelFilterView # noqa: F401
|
||||
from .schedules import OnCallScheduleChannelView # noqa: F401
|
||||
from .shift_swap import ShiftSwapViewSet # noqa: F401
|
||||
from .slack_channels import SlackChannelView # noqa: F401
|
||||
from .teams import TeamView # noqa: F401
|
||||
from .user_groups import UserGroupView # noqa: F401
|
||||
|
|
|
|||
97
engine/apps/public_api/views/shift_swap.py
Normal file
97
engine/apps/public_api/views/shift_swap.py
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
import logging
|
||||
|
||||
from django.utils import timezone
|
||||
from rest_framework import status
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.exceptions import NotFound
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.serializers import BaseSerializer
|
||||
|
||||
from apps.api.permissions import AuthenticatedRequest
|
||||
from apps.api.views.shift_swap import BaseShiftSwapViewSet
|
||||
from apps.auth_token.auth import ApiTokenAuthentication
|
||||
from apps.public_api.throttlers.user_throttle import UserThrottle
|
||||
from apps.schedules.models import ShiftSwapRequest
|
||||
from apps.user_management.models import User
|
||||
from common.api_helpers.custom_fields import TimeZoneAwareDatetimeField
|
||||
from common.api_helpers.exceptions import BadRequest
|
||||
from common.api_helpers.mixins import RateLimitHeadersMixin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ShiftSwapViewSet(RateLimitHeadersMixin, BaseShiftSwapViewSet):
|
||||
# set authentication and permission classes
|
||||
authentication_classes = (ApiTokenAuthentication,)
|
||||
permission_classes = (IsAuthenticated,)
|
||||
|
||||
# public API customizations
|
||||
throttle_classes = [UserThrottle]
|
||||
|
||||
def get_queryset(self):
|
||||
schedule_id = self.request.query_params.get("schedule_id", None)
|
||||
beneficiary = self.request.query_params.get("beneficiary", None)
|
||||
benefactor = self.request.query_params.get("benefactor", None)
|
||||
starting_after = self.request.query_params.get("starting_after", None)
|
||||
open_only = self.request.query_params.get("open_only", "false") == "true"
|
||||
|
||||
now = timezone.now()
|
||||
if starting_after:
|
||||
f = TimeZoneAwareDatetimeField()
|
||||
# trigger datetime format validation
|
||||
# will raise ValidationError if invalid timestamp is provided
|
||||
starting_after = f.to_internal_value(starting_after)
|
||||
else:
|
||||
starting_after = now
|
||||
|
||||
# base queryset filters by organization
|
||||
queryset = super().get_queryset()
|
||||
queryset = queryset.filter(swap_start__gte=starting_after)
|
||||
|
||||
if schedule_id:
|
||||
queryset = queryset.filter(schedule__public_primary_key=schedule_id)
|
||||
|
||||
if beneficiary:
|
||||
queryset = queryset.filter(beneficiary__public_primary_key=beneficiary)
|
||||
|
||||
if benefactor:
|
||||
queryset = queryset.filter(benefactor__public_primary_key=benefactor)
|
||||
|
||||
if benefactor:
|
||||
queryset = queryset.filter(benefactor__public_primary_key=benefactor)
|
||||
|
||||
if open_only:
|
||||
queryset = queryset.filter(benefactor__isnull=True, deleted_at__isnull=True, swap_start__gt=now)
|
||||
|
||||
return queryset.order_by("swap_start")
|
||||
|
||||
def get_object(self):
|
||||
public_primary_key = self.kwargs["pk"]
|
||||
try:
|
||||
return self.get_queryset().get(public_primary_key=public_primary_key)
|
||||
except ShiftSwapRequest.DoesNotExist:
|
||||
raise NotFound
|
||||
|
||||
def _get_user(self, field_name: str):
|
||||
"""Require and return user from ID given by field_name."""
|
||||
user_pk = self.request.data.pop(field_name, None)
|
||||
if not user_pk:
|
||||
raise BadRequest(detail=f"{field_name} user ID is required")
|
||||
try:
|
||||
user = User.objects.get(organization=self.request.auth.organization, public_primary_key=user_pk)
|
||||
except User.DoesNotExist:
|
||||
raise BadRequest(detail=f"Invalid {field_name} user ID")
|
||||
return user
|
||||
|
||||
def perform_create(self, serializer: BaseSerializer[ShiftSwapRequest]) -> None:
|
||||
beneficiary = self._get_user("beneficiary")
|
||||
self._do_create(beneficiary=beneficiary, serializer=serializer)
|
||||
|
||||
@action(methods=["post"], detail=True)
|
||||
def take(self, request: AuthenticatedRequest, pk: str) -> Response:
|
||||
# check the swap request exists and it's accessible
|
||||
self.get_object()
|
||||
benefactor = self._get_user("benefactor")
|
||||
serialized_shift_swap = self._do_take(benefactor=benefactor)
|
||||
return Response(serialized_shift_swap, status=status.HTTP_200_OK)
|
||||
|
|
@ -24,6 +24,8 @@ from apps.schedules.constants import (
|
|||
ICAL_LOCATION,
|
||||
ICAL_RECURRENCE_ID,
|
||||
ICAL_SEQUENCE,
|
||||
ICAL_STATUS,
|
||||
ICAL_STATUS_CANCELLED,
|
||||
ICAL_SUMMARY,
|
||||
ICAL_UID,
|
||||
RE_EVENT_UID_V1,
|
||||
|
|
@ -200,6 +202,10 @@ def get_shifts_dict(
|
|||
result_datetime = []
|
||||
result_date = []
|
||||
for event in events:
|
||||
status = event.get(ICAL_STATUS)
|
||||
if status == ICAL_STATUS_CANCELLED:
|
||||
# ignore cancelled events
|
||||
continue
|
||||
sequence = event.get(ICAL_SEQUENCE)
|
||||
recurrence_id = event.get(ICAL_RECURRENCE_ID)
|
||||
if recurrence_id:
|
||||
|
|
|
|||
|
|
@ -44,6 +44,9 @@ class ShiftSwapRequestManager(models.Manager):
|
|||
def hard_delete(self):
|
||||
return self.get_queryset().hard_delete()
|
||||
|
||||
def get_open_requests(self, now):
|
||||
return self.get_queryset().filter(benefactor__isnull=True, swap_start__gt=now)
|
||||
|
||||
|
||||
class ShiftSwapRequest(models.Model):
|
||||
beneficiary: "User"
|
||||
|
|
@ -54,6 +57,18 @@ class ShiftSwapRequest(models.Model):
|
|||
objects: models.Manager["ShiftSwapRequest"] = ShiftSwapRequestManager()
|
||||
objects_with_deleted: models.Manager["ShiftSwapRequest"] = models.Manager()
|
||||
|
||||
FOLLOWUP_OFFSETS = [
|
||||
timezone.timedelta(weeks=4),
|
||||
timezone.timedelta(weeks=3),
|
||||
timezone.timedelta(weeks=2),
|
||||
timezone.timedelta(weeks=1),
|
||||
timezone.timedelta(days=3),
|
||||
timezone.timedelta(days=2),
|
||||
timezone.timedelta(days=1),
|
||||
timezone.timedelta(hours=12),
|
||||
]
|
||||
"""When to send followups before the swap start time"""
|
||||
|
||||
public_primary_key = models.CharField(
|
||||
max_length=20,
|
||||
validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
|
||||
|
|
|
|||
|
|
@ -1 +1,2 @@
|
|||
from .slack_followups import send_shift_swap_request_slack_followups # noqa: F401
|
||||
from .slack_messages import create_shift_swap_request_message, update_shift_swap_request_message # noqa: F401
|
||||
|
|
|
|||
80
engine/apps/schedules/tasks/shift_swaps/slack_followups.py
Normal file
80
engine/apps/schedules/tasks/shift_swaps/slack_followups.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import datetime
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.schedules.models import ShiftSwapRequest
|
||||
from apps.slack.scenarios.shift_swap_requests import ShiftSwapRequestFollowUp
|
||||
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
||||
|
||||
task_logger = get_task_logger(__name__)
|
||||
|
||||
# FOLLOWUP_WINDOW is used by _get_shift_swap_requests_in_followup_window and _mark_followup_sent to:
|
||||
# 1. Determine which SSRs to send followups for when the periodic task is run
|
||||
# 2. Prevent sending multiple followups for a single SSRS in a short period
|
||||
FOLLOWUP_WINDOW = datetime.timedelta(hours=1)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task()
|
||||
def send_shift_swap_request_slack_followups() -> None:
|
||||
"""A periodic task to send Slack followups for shift swap requests."""
|
||||
|
||||
for shift_swap_request in _get_shift_swap_requests_in_followup_window(timezone.now()):
|
||||
if not _has_followup_been_sent(shift_swap_request):
|
||||
send_shift_swap_request_slack_followup.delay(shift_swap_request.pk)
|
||||
_mark_followup_sent(shift_swap_request)
|
||||
|
||||
|
||||
def _get_shift_swap_requests_in_followup_window(now: datetime.datetime) -> list[ShiftSwapRequest]:
|
||||
"""Get all SSRs that are in the followup window."""
|
||||
|
||||
shift_swap_requests_in_notification_window = []
|
||||
for shift_swap_request in ShiftSwapRequest.objects.get_open_requests(now):
|
||||
for offset in ShiftSwapRequest.FOLLOWUP_OFFSETS:
|
||||
notification_window_start = shift_swap_request.swap_start - offset
|
||||
notification_window_end = notification_window_start + FOLLOWUP_WINDOW
|
||||
|
||||
if notification_window_start <= now <= notification_window_end:
|
||||
shift_swap_requests_in_notification_window.append(shift_swap_request)
|
||||
break
|
||||
|
||||
return shift_swap_requests_in_notification_window
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(
|
||||
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else 10
|
||||
)
|
||||
def send_shift_swap_request_slack_followup(shift_swap_request_pk: int) -> None:
|
||||
"""Send a Slack followup message for a particular SSR."""
|
||||
|
||||
try:
|
||||
shift_swap_request = ShiftSwapRequest.objects.get(pk=shift_swap_request_pk)
|
||||
except ShiftSwapRequest.DoesNotExist:
|
||||
task_logger.warning(f"ShiftSwapRequest {shift_swap_request_pk} does not exist")
|
||||
return
|
||||
|
||||
if shift_swap_request.slack_channel_id is None:
|
||||
task_logger.warning(f"ShiftSwapRequest {shift_swap_request_pk} does not have an associated Slack channel")
|
||||
return
|
||||
|
||||
task_logger.info(f"Sending Slack followup for ShiftSwapRequest {shift_swap_request_pk}")
|
||||
step = ShiftSwapRequestFollowUp(
|
||||
shift_swap_request.organization.slack_team_identity, shift_swap_request.organization
|
||||
)
|
||||
step.post_message(shift_swap_request)
|
||||
|
||||
|
||||
def _has_followup_been_sent(shift_swap_request: ShiftSwapRequest) -> bool:
|
||||
key = _followup_cache_key(shift_swap_request)
|
||||
return cache.get(key) is True
|
||||
|
||||
|
||||
def _mark_followup_sent(shift_swap_request: ShiftSwapRequest) -> None:
|
||||
key = _followup_cache_key(shift_swap_request)
|
||||
cache.set(key, True, timeout=FOLLOWUP_WINDOW.total_seconds())
|
||||
|
||||
|
||||
def _followup_cache_key(shift_swap_request: ShiftSwapRequest) -> str:
|
||||
return f"ssr_slack_followup:{shift_swap_request.pk}"
|
||||
|
|
@ -0,0 +1,178 @@
|
|||
import datetime
|
||||
from unittest.mock import ANY, patch
|
||||
|
||||
import pytest
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.schedules.models import OnCallScheduleWeb, ShiftSwapRequest
|
||||
from apps.schedules.tasks.shift_swaps import send_shift_swap_request_slack_followups
|
||||
from apps.schedules.tasks.shift_swaps.slack_followups import (
|
||||
FOLLOWUP_WINDOW,
|
||||
_get_shift_swap_requests_in_followup_window,
|
||||
_mark_followup_sent,
|
||||
send_shift_swap_request_slack_followup,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def shift_swap_request_test_setup(
|
||||
make_organization_with_slack_team_identity,
|
||||
make_user,
|
||||
make_schedule,
|
||||
make_slack_channel,
|
||||
make_slack_message,
|
||||
make_shift_swap_request,
|
||||
):
|
||||
def _shift_swap_request_test_setup(swap_start=None, swap_end=None, **kwargs):
|
||||
organization, slack_team_identity = make_organization_with_slack_team_identity()
|
||||
user = make_user(organization=organization)
|
||||
|
||||
slack_channel = make_slack_channel(slack_team_identity)
|
||||
slack_message = make_slack_message(alert_group=None, organization=organization, slack_id="12345")
|
||||
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb, channel=slack_channel.slack_id)
|
||||
|
||||
if swap_start is None:
|
||||
swap_start = timezone.now() + timezone.timedelta(days=7)
|
||||
|
||||
if swap_end is None:
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
shift_swap_request = make_shift_swap_request(
|
||||
schedule, user, swap_start=swap_start, swap_end=swap_end, slack_message=slack_message, **kwargs
|
||||
)
|
||||
return shift_swap_request
|
||||
|
||||
return _shift_swap_request_test_setup
|
||||
|
||||
|
||||
@patch.object(send_shift_swap_request_slack_followup, "delay")
|
||||
@patch("apps.schedules.tasks.shift_swaps.slack_followups._mark_followup_sent")
|
||||
@patch("apps.schedules.tasks.shift_swaps.slack_followups._has_followup_been_sent", return_value=False)
|
||||
@pytest.mark.django_db
|
||||
def test_send_shift_swap_request_followups(
|
||||
mock_has_followup_been_sent,
|
||||
mock_mark_followup_sent,
|
||||
mock_send_shift_swap_request_followup,
|
||||
shift_swap_request_test_setup,
|
||||
):
|
||||
shift_swap_request = shift_swap_request_test_setup()
|
||||
|
||||
with patch(
|
||||
"apps.schedules.tasks.shift_swaps.slack_followups._get_shift_swap_requests_in_followup_window",
|
||||
return_value=[shift_swap_request],
|
||||
) as mock_get_shift_swap_requests_in_followup_window:
|
||||
send_shift_swap_request_slack_followups()
|
||||
|
||||
mock_get_shift_swap_requests_in_followup_window.assert_called_once()
|
||||
mock_has_followup_been_sent.assert_called_once_with(shift_swap_request)
|
||||
mock_mark_followup_sent.assert_called_once_with(shift_swap_request)
|
||||
mock_send_shift_swap_request_followup.assert_called_once_with(shift_swap_request.pk)
|
||||
|
||||
|
||||
@patch.object(send_shift_swap_request_slack_followup, "delay")
|
||||
@patch("apps.schedules.tasks.shift_swaps.slack_followups._mark_followup_sent")
|
||||
@patch("apps.schedules.tasks.shift_swaps.slack_followups._has_followup_been_sent", return_value=True)
|
||||
@pytest.mark.django_db
|
||||
def test_send_shift_swap_request_followups_already_sent(
|
||||
mock_has_followup_been_sent,
|
||||
mock_mark_followup_sent,
|
||||
mock_send_shift_swap_request_followup,
|
||||
shift_swap_request_test_setup,
|
||||
):
|
||||
shift_swap_request = shift_swap_request_test_setup()
|
||||
|
||||
with patch(
|
||||
"apps.schedules.tasks.shift_swaps.slack_followups._get_shift_swap_requests_in_followup_window",
|
||||
return_value=[shift_swap_request],
|
||||
) as mock_get_shift_swap_requests_in_followup_window:
|
||||
send_shift_swap_request_slack_followups()
|
||||
|
||||
mock_get_shift_swap_requests_in_followup_window.assert_called_once()
|
||||
mock_has_followup_been_sent.assert_called_once_with(shift_swap_request)
|
||||
mock_mark_followup_sent.assert_not_called()
|
||||
mock_send_shift_swap_request_followup.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_shift_swap_requests_in_followup_window(shift_swap_request_test_setup):
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=7)
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
shift_swap_request = shift_swap_request_test_setup(swap_start=swap_start, swap_end=swap_end)
|
||||
|
||||
for offset in ShiftSwapRequest.FOLLOWUP_OFFSETS:
|
||||
# not yet
|
||||
assert (
|
||||
_get_shift_swap_requests_in_followup_window(swap_start - offset - datetime.timedelta(microseconds=1)) == []
|
||||
)
|
||||
|
||||
# now
|
||||
assert _get_shift_swap_requests_in_followup_window(swap_start - offset) == [shift_swap_request]
|
||||
|
||||
# in the window
|
||||
assert _get_shift_swap_requests_in_followup_window(swap_start - offset + FOLLOWUP_WINDOW // 2) == [
|
||||
shift_swap_request
|
||||
]
|
||||
assert _get_shift_swap_requests_in_followup_window(swap_start - offset + FOLLOWUP_WINDOW) == [
|
||||
shift_swap_request
|
||||
]
|
||||
|
||||
# too late
|
||||
assert (
|
||||
_get_shift_swap_requests_in_followup_window(
|
||||
swap_start - offset + FOLLOWUP_WINDOW + datetime.timedelta(microseconds=1)
|
||||
)
|
||||
== []
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_shift_swap_requests_in_followup_not_open(shift_swap_request_test_setup, make_user):
|
||||
now = timezone.now()
|
||||
swap_start = now + timezone.timedelta(days=7)
|
||||
swap_end = swap_start + timezone.timedelta(days=1)
|
||||
|
||||
# open
|
||||
ssr_open = shift_swap_request_test_setup(swap_start=swap_start, swap_end=swap_end)
|
||||
# taken
|
||||
shift_swap_request_test_setup(
|
||||
swap_start=swap_start, swap_end=swap_end, benefactor=make_user(organization=ssr_open.schedule.organization)
|
||||
)
|
||||
# deleted
|
||||
shift_swap_request_test_setup(swap_start=swap_start, swap_end=swap_end, deleted_at=now)
|
||||
# past due
|
||||
shift_swap_request_test_setup(swap_start=now - timezone.timedelta(days=1))
|
||||
|
||||
assert _get_shift_swap_requests_in_followup_window(now) == [ssr_open]
|
||||
|
||||
|
||||
def test_followup_offsets():
|
||||
for idx in range(1, len(ShiftSwapRequest.FOLLOWUP_OFFSETS)):
|
||||
assert ShiftSwapRequest.FOLLOWUP_OFFSETS[idx - 1] - ShiftSwapRequest.FOLLOWUP_OFFSETS[idx] > FOLLOWUP_WINDOW
|
||||
assert ShiftSwapRequest.FOLLOWUP_OFFSETS[idx] > FOLLOWUP_WINDOW
|
||||
|
||||
|
||||
@patch("apps.slack.slack_client.SlackClientWithErrorHandling.api_call")
|
||||
@pytest.mark.django_db
|
||||
def test_send_shift_swap_request_followup(mock_slack_api_call, shift_swap_request_test_setup):
|
||||
shift_swap_request = shift_swap_request_test_setup()
|
||||
send_shift_swap_request_slack_followup(shift_swap_request.pk)
|
||||
|
||||
mock_slack_api_call.assert_called_once_with(
|
||||
"chat.postMessage",
|
||||
channel=shift_swap_request.slack_message.channel_id,
|
||||
thread_ts=shift_swap_request.slack_message.slack_id,
|
||||
reply_broadcast=True,
|
||||
blocks=ANY,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_mark_followup_sent(shift_swap_request_test_setup):
|
||||
shift_swap_request = shift_swap_request_test_setup()
|
||||
|
||||
with patch.object(cache, "set") as mock_cache_set:
|
||||
_mark_followup_sent(shift_swap_request)
|
||||
assert mock_cache_set.call_args.kwargs["timeout"] == FOLLOWUP_WINDOW.total_seconds()
|
||||
|
|
@ -16,7 +16,13 @@ from apps.schedules.ical_utils import (
|
|||
parse_event_uid,
|
||||
users_in_ical,
|
||||
)
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallSchedule, OnCallScheduleCalendar, OnCallScheduleWeb
|
||||
from apps.schedules.models import (
|
||||
CustomOnCallShift,
|
||||
OnCallSchedule,
|
||||
OnCallScheduleCalendar,
|
||||
OnCallScheduleICal,
|
||||
OnCallScheduleWeb,
|
||||
)
|
||||
|
||||
|
||||
def test_get_icalendar_tz_or_utc():
|
||||
|
|
@ -122,6 +128,38 @@ def test_list_users_to_notify_from_ical_viewers_inclusion(
|
|||
assert set(users_on_call) == {user}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_list_users_to_notify_from_ical_ignore_cancelled(make_organization_and_user, make_schedule):
|
||||
organization, user = make_organization_and_user()
|
||||
now = timezone.now().replace(second=0, microsecond=0)
|
||||
end = now + timezone.timedelta(minutes=30)
|
||||
ical_data = textwrap.dedent(
|
||||
"""
|
||||
BEGIN:VCALENDAR
|
||||
VERSION:2.0
|
||||
CALSCALE:GREGORIAN
|
||||
METHOD:PUBLISH
|
||||
BEGIN:VEVENT
|
||||
SUMMARY:{}
|
||||
DTSTART;VALUE=DATE-TIME:{}
|
||||
DTEND;VALUE=DATE-TIME:{}
|
||||
DTSTAMP;VALUE=DATE-TIME:20230807T001508Z
|
||||
UID:some-uid
|
||||
LOCATION:primary
|
||||
STATUS:CANCELLED
|
||||
END:VEVENT
|
||||
END:VCALENDAR
|
||||
""".format(
|
||||
user.username, now.strftime("%Y%m%dT%H%M%SZ"), end.strftime("%Y%m%dT%H%M%SZ")
|
||||
)
|
||||
)
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleICal, cached_ical_file_primary=ical_data)
|
||||
|
||||
# get users on-call
|
||||
users_on_call = list_users_to_notify_from_ical(schedule, now + timezone.timedelta(minutes=5))
|
||||
assert len(users_on_call) == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_list_users_to_notify_from_ical_until_terminated_event(
|
||||
make_organization_and_user, make_user_for_organization, make_schedule, make_on_call_shift
|
||||
|
|
|
|||
|
|
@ -2,6 +2,9 @@ import json
|
|||
import logging
|
||||
import typing
|
||||
|
||||
import humanize
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.slack.constants import DIVIDER
|
||||
from apps.slack.models import SlackMessage
|
||||
from apps.slack.scenarios import scenario_step
|
||||
|
|
@ -209,6 +212,38 @@ class AcceptShiftSwapRequestStep(BaseShiftSwapRequestStep):
|
|||
self.update_message(shift_swap_request)
|
||||
|
||||
|
||||
class ShiftSwapRequestFollowUp(scenario_step.ScenarioStep):
|
||||
@staticmethod
|
||||
def _generate_blocks(shift_swap_request: "ShiftSwapRequest") -> Block.AnyBlocks:
|
||||
# Time until shift swap starts (example: "14 days", "2 hours")
|
||||
delta = humanize.naturaldelta(timezone.now() - shift_swap_request.swap_start)
|
||||
|
||||
return [
|
||||
typing.cast(
|
||||
Block.Section,
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": (
|
||||
f":exclamation: This shift swap request is still open and will start in {delta}.\n"
|
||||
"Jump back into the thread and accept it if you're available!"
|
||||
),
|
||||
},
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
def post_message(self, shift_swap_request: "ShiftSwapRequest") -> None:
|
||||
self._slack_client.api_call(
|
||||
"chat.postMessage",
|
||||
channel=shift_swap_request.slack_message.channel_id,
|
||||
thread_ts=shift_swap_request.slack_message.slack_id,
|
||||
reply_broadcast=True,
|
||||
blocks=self._generate_blocks(shift_swap_request),
|
||||
)
|
||||
|
||||
|
||||
STEPS_ROUTING: ScenarioRoute.RoutingSteps = [
|
||||
{
|
||||
"payload_type": PayloadType.BLOCK_ACTIONS,
|
||||
|
|
|
|||
|
|
@ -22,67 +22,89 @@ acknowledge_condition = None
|
|||
|
||||
# Web
|
||||
web_title = """\
|
||||
{%- set groupLabels = payload.groupLabels.copy() -%}
|
||||
{%- set alertname = groupLabels.pop('alertname') | default("") -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}).copy() -%}
|
||||
{% if "labels" in payload -%}
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% set alertname = payload.get("labels", {}).get("alertname", "") -%}
|
||||
{% else -%}
|
||||
{% set alertname = groupLabels.pop("alertname", "") -%}
|
||||
{% endif -%}
|
||||
|
||||
|
||||
[{{ payload.status }}{% if payload.status == 'firing' %}:{{ payload.numFiring }}{% endif %}] {{ alertname }} {% if groupLabels | length > 0 %}({{ groupLabels|join(", ") }}){% endif %}
|
||||
[{{ payload.status }}{% if payload.status == 'firing' and payload.numFiring %}:{{ payload.numFiring }}{% endif %}] {{ alertname }} {% if groupLabels | length > 0 %}({{ groupLabels.values()|join(", ") }}){% endif %}
|
||||
""" # noqa
|
||||
|
||||
web_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
[:book: Runbook:link:]({{ annotations.runbook_url }})
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
[:closed_book: Runbook (internal):link:]({{ annotations.runbook_url_internal }})
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
[View in AlertManager]({{ source_link }})
|
||||
"""
|
||||
|
||||
|
||||
# Slack
|
||||
slack_title = """\
|
||||
{%- set groupLabels = payload.groupLabels.copy() -%}
|
||||
{%- set alertname = groupLabels.pop('alertname') | default("") -%}
|
||||
*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ web_title }}>* via {{ integration_name }}
|
||||
{% if source_link %}
|
||||
(*<{{ source_link }}|source>*)
|
||||
|
|
@ -99,50 +121,71 @@ slack_title = """\
|
|||
# """
|
||||
|
||||
slack_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
<{{ annotations.runbook_url }}|:book: Runbook:link:>
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
<{{ annotations.runbook_url_internal }}|:closed_book: Runbook (internal):link:>
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
"""
|
||||
# noqa: W291
|
||||
|
||||
|
|
@ -155,57 +198,77 @@ web_image_url = None
|
|||
sms_title = web_title
|
||||
|
||||
# Phone
|
||||
phone_call_title = """{{ payload.groupLabels|join(", ") }}"""
|
||||
phone_call_title = """{{ payload.get("groupLabels", {}).values() |join(", ") }}"""
|
||||
|
||||
# Telegram
|
||||
telegram_title = web_title
|
||||
|
||||
telegram_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
<a href='{{ annotations.runbook_url }}'>:book: Runbook:link:</a>
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
<a href='{{ annotations.runbook_url_internal }}'>:closed_book: Runbook (internal):link:</a>
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
<a href='{{ source_link }}'>View in AlertManager</a>
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ is_featured = True
|
|||
featured_tag_name = "Quick Connect"
|
||||
is_able_to_autoresolve = True
|
||||
is_demo_alert_enabled = True
|
||||
based_on_alertmanager = True
|
||||
|
||||
|
||||
# Behaviour
|
||||
|
|
@ -22,69 +23,91 @@ resolve_condition = """{{ payload.status == "resolved" }}"""
|
|||
|
||||
acknowledge_condition = None
|
||||
|
||||
|
||||
# Web
|
||||
web_title = """\
|
||||
{%- set groupLabels = payload.groupLabels.copy() -%}
|
||||
{%- set alertname = groupLabels.pop('alertname') | default("") -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}).copy() -%}
|
||||
{% if "labels" in payload -%}
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% set alertname = payload.get("labels", {}).get("alertname", "") -%}
|
||||
{% else -%}
|
||||
{% set alertname = groupLabels.pop("alertname", "") -%}
|
||||
{% endif -%}
|
||||
|
||||
|
||||
[{{ payload.status }}{% if payload.status == 'firing' %}:{{ payload.numFiring }}{% endif %}] {{ alertname }} {% if groupLabels | length > 0 %}({{ groupLabels|join(", ") }}){% endif %}
|
||||
[{{ payload.status }}{% if payload.status == 'firing' and payload.numFiring %}:{{ payload.numFiring }}{% endif %}] {{ alertname }} {% if groupLabels | length > 0 %}({{ groupLabels.values()|join(", ") }}){% endif %}
|
||||
""" # noqa
|
||||
|
||||
web_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
[:book: Runbook:link:]({{ annotations.runbook_url }})
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
[:closed_book: Runbook (internal):link:]({{ annotations.runbook_url_internal }})
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
[View in AlertManager]({{ source_link }})
|
||||
"""
|
||||
|
||||
|
||||
# Slack templates
|
||||
# Slack
|
||||
slack_title = """\
|
||||
{%- set groupLabels = payload.groupLabels.copy() -%}
|
||||
{%- set alertname = groupLabels.pop('alertname') | default("") -%}
|
||||
*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ web_title }}>* via {{ integration_name }}
|
||||
{% if source_link %}
|
||||
(*<{{ source_link }}|source>*)
|
||||
|
|
@ -101,50 +124,71 @@ slack_title = """\
|
|||
# """
|
||||
|
||||
slack_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
<{{ annotations.runbook_url }}|:book: Runbook:link:>
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
<{{ annotations.runbook_url_internal }}|:closed_book: Runbook (internal):link:>
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
"""
|
||||
# noqa: W291
|
||||
|
||||
|
|
@ -153,59 +197,81 @@ slack_image_url = None
|
|||
|
||||
web_image_url = None
|
||||
|
||||
# SMS
|
||||
sms_title = web_title
|
||||
|
||||
# Phone
|
||||
phone_call_title = """{{ payload.get("groupLabels", {}).values() |join(", ") }}"""
|
||||
|
||||
phone_call_title = """{{ payload.groupLabels|join(", ") }}"""
|
||||
|
||||
# Telegram
|
||||
telegram_title = web_title
|
||||
|
||||
telegram_message = """\
|
||||
{%- set annotations = payload.commonAnnotations.copy() -%}
|
||||
{% set annotations = payload.get("commonAnnotations", {}).copy() -%}
|
||||
{% set groupLabels = payload.get("groupLabels", {}) -%}
|
||||
{% set commonLabels = payload.get("commonLabels", {}) -%}
|
||||
{% set severity = groupLabels.severity -%}
|
||||
{% set legacyLabels = payload.get("labels", {}) -%}
|
||||
{% set legacyAnnotations = payload.get("annotations", {}) -%}
|
||||
|
||||
{% set severity = payload.groupLabels.severity -%}
|
||||
{% if severity %}
|
||||
{%- set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
{% if severity -%}
|
||||
{% set severity_emoji = {"critical": ":rotating_light:", "warning": ":warning:" }[severity] | default(":question:") -%}
|
||||
Severity: {{ severity }} {{ severity_emoji }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{%- set status = payload.status | default("Unknown") %}
|
||||
{%- set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") %}
|
||||
{% set status = payload.get("status", "Unknown") -%}
|
||||
{% set status_emoji = {"firing": ":fire:", "resolved": ":white_check_mark:"}[status] | default(":warning:") -%}
|
||||
Status: {{ status }} {{ status_emoji }} (on the source)
|
||||
{% if status == "firing" %}
|
||||
{% if status == "firing" and payload.numFiring -%}
|
||||
Firing alerts – {{ payload.numFiring }}
|
||||
Resolved alerts – {{ payload.numResolved }}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
|
||||
{% if "runbook_url" in annotations -%}
|
||||
<a href='{{ annotations.runbook_url }}'>:book: Runbook:link:</a>
|
||||
{%- set _ = annotations.pop('runbook_url') -%}
|
||||
{%- endif %}
|
||||
{% set _ = annotations.pop('runbook_url') -%}
|
||||
{% endif -%}
|
||||
|
||||
{%- if "runbook_url_internal" in annotations -%}
|
||||
{% if "runbook_url_internal" in annotations -%}
|
||||
<a href='{{ annotations.runbook_url_internal }}'>:closed_book: Runbook (internal):link:</a>
|
||||
{%- set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{%- endif %}
|
||||
|
||||
GroupLabels:
|
||||
{%- for k, v in payload["groupLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
|
||||
{% if payload["commonLabels"] | length > 0 -%}
|
||||
CommonLabels:
|
||||
{%- for k, v in payload["commonLabels"].items() %}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% set _ = annotations.pop('runbook_url_internal') -%}
|
||||
{% endif %}
|
||||
|
||||
{%- if groupLabels | length > 0 %}
|
||||
GroupLabels:
|
||||
{% for k, v in groupLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if commonLabels | length > 0 -%}
|
||||
CommonLabels:
|
||||
{% for k, v in commonLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if annotations | length > 0 -%}
|
||||
Annotations:
|
||||
{%- for k, v in annotations.items() %}
|
||||
{% for k, v in annotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{# backward compatibility with legacy alertmanager integration -#}
|
||||
{% if legacyLabels | length > 0 -%}
|
||||
Labels:
|
||||
{% for k, v in legacyLabels.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
|
||||
{% if legacyAnnotations | length > 0 -%}
|
||||
Annotations:
|
||||
{% for k, v in legacyAnnotations.items() -%}
|
||||
- {{ k }}: {{ v }}
|
||||
{% endfor %}
|
||||
{% endif -%}
|
||||
<a href='{{ source_link }}'>View in AlertManager</a>
|
||||
"""
|
||||
|
||||
|
|
|
|||
9
engine/engine/included_path.py
Normal file
9
engine/engine/included_path.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
from django.conf import settings
|
||||
|
||||
|
||||
def custom_preprocessing_hook(endpoints):
|
||||
filtered = []
|
||||
for path, path_regex, method, callback in endpoints:
|
||||
if any(path_prefix in path for path_prefix in settings.SPECTACULAR_INCLUDED_PATHS):
|
||||
filtered.append((path, path_regex, method, callback))
|
||||
return filtered
|
||||
|
|
@ -73,3 +73,11 @@ if settings.DEBUG:
|
|||
|
||||
if settings.SILK_PROFILER_ENABLED:
|
||||
urlpatterns += [path(settings.SILK_PATH, include("silk.urls", namespace="silk"))]
|
||||
|
||||
if settings.DRF_SPECTACULAR_ENABLED:
|
||||
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
|
||||
|
||||
urlpatterns += [
|
||||
path("internal/schema/", SpectacularAPIView.as_view(api_version="internal/v1"), name="schema"),
|
||||
path("internal/schema/swagger-ui/", SpectacularSwaggerView.as_view(url_name="schema"), name="swagger-ui"),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ slackclient==1.3.0
|
|||
whitenoise==5.3.0
|
||||
twilio~=6.37.0
|
||||
phonenumbers==8.10.0
|
||||
# TODO: remove django-ordered-model after migration to custom OrderModel
|
||||
django-ordered-model==3.1.1
|
||||
celery[amqp,redis]==5.3.1
|
||||
redis==4.6.0
|
||||
humanize==0.5.1
|
||||
|
|
@ -59,3 +57,4 @@ urllib3==1.26.15
|
|||
prometheus_client==0.16.0
|
||||
lxml==4.9.2
|
||||
babel==2.12.1
|
||||
drf-spectacular==0.26.2
|
||||
|
|
|
|||
|
|
@ -209,7 +209,6 @@ INSTALLED_APPS = [
|
|||
"django.contrib.staticfiles",
|
||||
"rest_framework",
|
||||
"django_filters",
|
||||
"ordered_model",
|
||||
"mirage",
|
||||
"engine",
|
||||
"apps.user_management",
|
||||
|
|
@ -238,6 +237,7 @@ INSTALLED_APPS = [
|
|||
"fcm_django",
|
||||
"django_dbconn_retry",
|
||||
"apps.phone_notifications",
|
||||
"drf_spectacular",
|
||||
]
|
||||
|
||||
REST_FRAMEWORK = {
|
||||
|
|
@ -247,8 +247,30 @@ REST_FRAMEWORK = {
|
|||
"rest_framework.parsers.MultiPartParser",
|
||||
),
|
||||
"DEFAULT_AUTHENTICATION_CLASSES": [],
|
||||
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
|
||||
}
|
||||
|
||||
|
||||
DRF_SPECTACULAR_ENABLED = getenv_boolean("DRF_SPECTACULAR_ENABLED", default=False)
|
||||
|
||||
SPECTACULAR_SETTINGS = {
|
||||
"TITLE": "Grafana OnCall Private API",
|
||||
"DESCRIPTION": "Internal API docs. This is not meant to be used by end users. API endpoints will be kept added/removed/changed without notice.",
|
||||
"VERSION": "1.0.0",
|
||||
"SERVE_INCLUDE_SCHEMA": False,
|
||||
# OTHER SETTINGS
|
||||
"PREPROCESSING_HOOKS": [
|
||||
"engine.included_path.custom_preprocessing_hook"
|
||||
], # Custom hook to include only paths from SPECTACULAR_INCLUDED_PATHS
|
||||
"SERVE_URLCONF": ("apps.api.urls"),
|
||||
"SWAGGER_UI_SETTINGS": {"supportedSubmitMethods": []}, # Disable "Try it out" button for all endpoints
|
||||
}
|
||||
|
||||
SPECTACULAR_INCLUDED_PATHS = [
|
||||
"/features",
|
||||
"/alertgroups",
|
||||
]
|
||||
|
||||
MIDDLEWARE = [
|
||||
"log_request_id.middleware.RequestIDMiddleware",
|
||||
"engine.middlewares.RequestTimeLoggingMiddleware",
|
||||
|
|
@ -481,6 +503,10 @@ CELERY_BEAT_SCHEDULE = {
|
|||
},
|
||||
"notify_shift_swap_requests": {
|
||||
"task": "apps.mobile_app.tasks.notify_shift_swap_requests",
|
||||
"schedule": getenv_integer("NOTIFY_SHIFT_SWAP_REQUESTS_INTERVAL", default=10 * 60),
|
||||
},
|
||||
"send_shift_swap_request_slack_followups": {
|
||||
"task": "apps.schedules.tasks.shift_swaps.slack_followups.send_shift_swap_request_slack_followups",
|
||||
"schedule": 10 * 60,
|
||||
},
|
||||
"save_organizations_ids_in_cache": {
|
||||
|
|
|
|||
|
|
@ -77,8 +77,10 @@ CELERY_TASK_ROUTES = {
|
|||
"apps.schedules.tasks.notify_about_empty_shifts_in_schedule.start_notify_about_empty_shifts_in_schedule": {
|
||||
"queue": "default"
|
||||
},
|
||||
"apps.schedules.tasks.shift_swaps.slack_messages.post_shift_swap_request_creation_message": {"queue": "default"},
|
||||
"apps.schedules.tasks.shift_swaps.slack_messages.create_shift_swap_request_message": {"queue": "default"},
|
||||
"apps.schedules.tasks.shift_swaps.slack_messages.update_shift_swap_request_message": {"queue": "default"},
|
||||
"apps.schedules.tasks.shift_swaps.slack_followups.send_shift_swap_request_slack_followups": {"queue": "default"},
|
||||
"apps.schedules.tasks.shift_swaps.slack_followups.send_shift_swap_request_slack_followup": {"queue": "default"},
|
||||
# CRITICAL
|
||||
"apps.alerts.tasks.acknowledge_reminder.acknowledge_reminder_task": {"queue": "critical"},
|
||||
"apps.alerts.tasks.acknowledge_reminder.unacknowledge_timeout_task": {"queue": "critical"},
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue