commit
56eefe14ba
55 changed files with 876 additions and 263 deletions
2
.github/workflows/e2e-tests.yml
vendored
2
.github/workflows/e2e-tests.yml
vendored
|
|
@ -179,7 +179,7 @@ jobs:
|
|||
|
||||
- name: Upload artifact
|
||||
if: failure()
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./grafana-plugin/playwright-report/
|
||||
|
||||
|
|
|
|||
|
|
@ -40,22 +40,22 @@ The above command returns JSON structured in the following way:
|
|||
}
|
||||
```
|
||||
|
||||
| Parameter | Required | Description |
|
||||
| ---------------------------------- |:----------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `escalation_chain_id` | Yes | Each escalation policy is assigned to a specific escalation chain. |
|
||||
| `position` | Optional | Escalation policies execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down in the list. |
|
||||
| `type` | Yes | One of: `wait`, `notify_persons`, `notify_person_next_each_time`, `notify_on_call_from_schedule`, `notify_user_group`, `trigger_webhook`, `resolve`, `notify_whole_channel`, `notify_if_time_from_to`, `declare_incident`. |
|
||||
| `important` | Optional | Default is `false`. Will assign "important" to personal notification rules if `true`. This can be used to distinguish alerts on which you want to be notified immediately by phone. Applicable for types `notify_persons`, `notify_team_members`, `notify_on_call_from_schedule`, and `notify_user_group`. |
|
||||
| `duration` | If type = `wait` | The duration, in seconds, when type `wait` is chosen. Valid values are any number of seconds in the inclusive range `60 to 86400`. |
|
||||
| `action_to_trigger` | If type = `trigger_webhook` | ID of a webhook. |
|
||||
| `group_to_notify` | If type = `notify_user_group` | ID of a `User Group`. |
|
||||
| `persons_to_notify` | If type = `notify_persons` | List of user IDs. |
|
||||
| `persons_to_notify_next_each_time` | If type = `notify_person_next_each_time` | List of user IDs. |
|
||||
| `notify_on_call _from_schedule` | If type = `notify_on_call_from_schedule` | ID of a Schedule. |
|
||||
| `notify_if_time_from` | If type = `notify_if_time_from_to` | UTC time represents the beginning of the time period, for example `09:00:00Z`. |
|
||||
| `notify_if_time_to` | If type = `notify_if_time_from_to` | UTC time represents the end of the time period, for example `18:00:00Z`. |
|
||||
| `team_to_notify` | If type = `notify_team_members` | ID of a team. |
|
||||
| `severity` | If type = `declare_incident` | Severity of the incident. |
|
||||
| Parameter | Required | Description |
|
||||
| ---------------------------------- |:----------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `escalation_chain_id` | Yes | Each escalation policy is assigned to a specific escalation chain. |
|
||||
| `position` | Optional | Escalation policies execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down in the list. |
|
||||
| `type` | Yes | One of: `wait`, `notify_persons`, `notify_person_next_each_time`, `notify_on_call_from_schedule`, `notify_user_group`, `trigger_webhook`, `resolve`, `notify_whole_channel`, `notify_if_time_from_to`, `declare_incident`. |
|
||||
| `important` | Optional | Default is `false`. Will assign "important" to personal notification rules if `true`. This can be used to distinguish alerts on which you want to be notified immediately by phone. Applicable for types `notify_persons`, `notify_person_next_each_time`, `notify_team_members`, `notify_on_call_from_schedule`, and `notify_user_group`. |
|
||||
| `duration` | If type = `wait` | The duration, in seconds, when type `wait` is chosen. Valid values are any number of seconds in the inclusive range `60 to 86400`. |
|
||||
| `action_to_trigger` | If type = `trigger_webhook` | ID of a webhook. |
|
||||
| `group_to_notify` | If type = `notify_user_group` | ID of a `User Group`. |
|
||||
| `persons_to_notify` | If type = `notify_persons` | List of user IDs. |
|
||||
| `persons_to_notify_next_each_time` | If type = `notify_person_next_each_time` | List of user IDs. |
|
||||
| `notify_on_call _from_schedule` | If type = `notify_on_call_from_schedule` | ID of a Schedule. |
|
||||
| `notify_if_time_from` | If type = `notify_if_time_from_to` | UTC time represents the beginning of the time period, for example `09:00:00Z`. |
|
||||
| `notify_if_time_to` | If type = `notify_if_time_from_to` | UTC time represents the end of the time period, for example `18:00:00Z`. |
|
||||
| `team_to_notify` | If type = `notify_team_members` | ID of a team. |
|
||||
| `severity` | If type = `declare_incident` | Severity of the incident. |
|
||||
|
||||
**HTTP request**
|
||||
|
||||
|
|
|
|||
|
|
@ -25,3 +25,7 @@ class AlertGroupState(str, Enum):
|
|||
ACKNOWLEDGED = "acknowledged"
|
||||
RESOLVED = "resolved"
|
||||
SILENCED = "silenced"
|
||||
|
||||
|
||||
SERVICE_LABEL = "service_name"
|
||||
SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION = "{{ payload.commonLabels.service_name }}"
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ class EscalationPolicySnapshot:
|
|||
EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT: self._escalation_step_notify_on_call_schedule,
|
||||
EscalationPolicy.STEP_TRIGGER_CUSTOM_WEBHOOK: self._escalation_step_trigger_custom_webhook,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE: self._escalation_step_notify_users_queue,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT: self._escalation_step_notify_users_queue,
|
||||
EscalationPolicy.STEP_NOTIFY_IF_TIME: self._escalation_step_notify_if_time,
|
||||
EscalationPolicy.STEP_NOTIFY_IF_NUM_ALERTS_IN_TIME_WINDOW: self._escalation_step_notify_if_num_alerts_in_time_window,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS: self._escalation_step_notify_multiple_users,
|
||||
|
|
@ -199,6 +200,7 @@ class EscalationPolicySnapshot:
|
|||
),
|
||||
{
|
||||
"reason": reason,
|
||||
"important": self.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
},
|
||||
immutable=True,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -100,9 +100,10 @@ class IncidentLogBuilder:
|
|||
]
|
||||
excluded_escalation_steps = [EscalationPolicy.STEP_WAIT, EscalationPolicy.STEP_FINAL_RESOLVE]
|
||||
not_excluded_steps_with_author = [
|
||||
EscalationPolicy.STEP_NOTIFY,
|
||||
EscalationPolicy.STEP_NOTIFY_IMPORTANT,
|
||||
EscalationPolicy._DEPRECATED_STEP_NOTIFY,
|
||||
EscalationPolicy._DEPRECATED_STEP_NOTIFY_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
]
|
||||
|
||||
# exclude logs that we don't want to see in after resolve report
|
||||
|
|
@ -466,6 +467,7 @@ class IncidentLogBuilder:
|
|||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
]:
|
||||
users_to_notify: UsersToNotify = escalation_policy_snapshot.sorted_users_queue
|
||||
|
||||
|
|
@ -473,7 +475,10 @@ class IncidentLogBuilder:
|
|||
if users_to_notify:
|
||||
plan_line = f'escalation step "{escalation_policy_snapshot.step_display}"'
|
||||
|
||||
if escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
|
||||
if escalation_policy_snapshot.step in (
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
):
|
||||
try:
|
||||
last_user_index = users_to_notify.index(escalation_policy_snapshot.last_notified_user)
|
||||
except ValueError:
|
||||
|
|
@ -489,14 +494,21 @@ class IncidentLogBuilder:
|
|||
|
||||
escalation_plan.setdefault(timedelta, []).append({"plan_lines": [plan_line]})
|
||||
|
||||
elif escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
|
||||
elif escalation_policy_snapshot.step in (
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
):
|
||||
last_notified_user = escalation_policy_snapshot.last_notified_user
|
||||
users_to_notify = [last_notified_user] if last_notified_user else []
|
||||
|
||||
for user_to_notify in users_to_notify:
|
||||
notification_plan = self._get_notification_plan_for_user(
|
||||
user_to_notify,
|
||||
important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
important=escalation_policy_snapshot.step
|
||||
in [
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
],
|
||||
for_slack=for_slack,
|
||||
future_step=future_step,
|
||||
)
|
||||
|
|
@ -524,7 +536,7 @@ class IncidentLogBuilder:
|
|||
)
|
||||
else:
|
||||
plan_line = (
|
||||
f'escalation step "{escalation_policy_snapshot.step_display}" is slack specific. ' f"Skipping"
|
||||
f'escalation step "{escalation_policy_snapshot.step_display}" is Slack specific. ' f"Skipping"
|
||||
)
|
||||
|
||||
escalation_plan.setdefault(timedelta, []).append({"plan_lines": [plan_line]})
|
||||
|
|
@ -534,7 +546,6 @@ class IncidentLogBuilder:
|
|||
for user_to_notify in final_notify_all_users_to_notify:
|
||||
notification_plan = self._get_notification_plan_for_user(
|
||||
user_to_notify,
|
||||
important=escalation_policy_snapshot.step == EscalationPolicy.STEP_NOTIFY_IMPORTANT,
|
||||
for_slack=for_slack,
|
||||
future_step=future_step,
|
||||
)
|
||||
|
|
@ -586,7 +597,7 @@ class IncidentLogBuilder:
|
|||
)
|
||||
else:
|
||||
plan_line = (
|
||||
f'escalation step "{escalation_policy_snapshot.step_display}" is slack specific. Skipping'
|
||||
f'escalation step "{escalation_policy_snapshot.step_display}" is Slack specific. Skipping'
|
||||
)
|
||||
|
||||
escalation_plan.setdefault(timedelta, []).append({"plan_lines": [plan_line]})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 4.2.15 on 2025-01-20 10:33
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('alerts', '0073_update_direct_paging_integration_non_default_routes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='escalationpolicy',
|
||||
name='step',
|
||||
field=models.IntegerField(choices=[(0, 'Wait'), (1, 'Notify User'), (2, 'Notify Whole Channel'), (3, 'Repeat Escalation (5 times max)'), (4, 'Resolve'), (5, 'Notify Group'), (6, 'Notify Schedule'), (7, 'Notify User (Important)'), (8, 'Notify Group (Important)'), (9, 'Notify Schedule (Important)'), (10, 'Trigger Outgoing Webhook'), (11, 'Notify User (next each time)'), (12, 'Continue escalation only if time is from'), (13, 'Notify multiple Users'), (14, 'Notify multiple Users (Important)'), (15, 'Continue escalation if >X alerts per Y minutes'), (16, 'Trigger Webhook'), (17, 'Notify all users in a Team'), (18, 'Notify all users in a Team (Important)'), (19, 'Declare Incident'), (20, 'Notify User (next each time) (Important)')], default=None, null=True),
|
||||
),
|
||||
]
|
||||
|
|
@ -14,7 +14,7 @@ from apps.alerts.constants import TASK_DELAY_SECONDS
|
|||
from apps.alerts.incident_appearance.templaters import TemplateLoader
|
||||
from apps.alerts.signals import alert_group_escalation_snapshot_built
|
||||
from apps.alerts.tasks.distribute_alert import send_alert_create_signal
|
||||
from apps.labels.alert_group_labels import assign_labels, gather_labels_from_alert_receive_channel_and_raw_request_data
|
||||
from apps.labels.alert_group_labels import gather_alert_labels, save_alert_group_labels
|
||||
from apps.labels.types import AlertLabels
|
||||
from common.jinja_templater import apply_jinja_template_to_alert_payload_and_labels
|
||||
from common.jinja_templater.apply_jinja_template import (
|
||||
|
|
@ -106,13 +106,11 @@ class Alert(models.Model):
|
|||
# This import is here to avoid circular imports
|
||||
from apps.alerts.models import AlertGroup, AlertGroupLogRecord, AlertReceiveChannel, ChannelFilter
|
||||
|
||||
parsed_labels = gather_labels_from_alert_receive_channel_and_raw_request_data(
|
||||
alert_receive_channel, raw_request_data
|
||||
)
|
||||
group_data = Alert.render_group_data(alert_receive_channel, raw_request_data, parsed_labels, is_demo)
|
||||
alert_labels = gather_alert_labels(alert_receive_channel, raw_request_data)
|
||||
group_data = Alert.render_group_data(alert_receive_channel, raw_request_data, alert_labels, is_demo)
|
||||
|
||||
if channel_filter is None:
|
||||
channel_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, parsed_labels)
|
||||
channel_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, alert_labels)
|
||||
|
||||
# Get or create group
|
||||
group, group_created = AlertGroup.objects.get_or_create_grouping(
|
||||
|
|
@ -141,7 +139,7 @@ class Alert(models.Model):
|
|||
transaction.on_commit(partial(send_alert_create_signal.apply_async, (alert.pk,)))
|
||||
|
||||
if group_created:
|
||||
assign_labels(group, alert_receive_channel, parsed_labels)
|
||||
save_alert_group_labels(group, alert_receive_channel, alert_labels)
|
||||
group.log_records.create(type=AlertGroupLogRecord.TYPE_REGISTERED)
|
||||
group.log_records.create(type=AlertGroupLogRecord.TYPE_ROUTE_ASSIGNED)
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from django.utils import timezone
|
|||
from django.utils.crypto import get_random_string
|
||||
from emoji import emojize
|
||||
|
||||
from apps.alerts.constants import SERVICE_LABEL, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION
|
||||
from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
|
||||
from apps.alerts.integration_options_mixin import IntegrationOptionsMixin
|
||||
from apps.alerts.models.maintainable_object import MaintainableObject
|
||||
|
|
@ -24,6 +25,7 @@ from apps.grafana_plugin.ui_url_builder import UIURLBuilder
|
|||
from apps.integrations.legacy_prefix import remove_legacy_prefix
|
||||
from apps.integrations.metadata import heartbeat
|
||||
from apps.integrations.tasks import create_alert, create_alertmanager_alerts
|
||||
from apps.labels.tasks import add_service_label_for_integration
|
||||
from apps.metrics_exporter.helpers import (
|
||||
metrics_add_integrations_to_cache,
|
||||
metrics_remove_deleted_integration_from_cache,
|
||||
|
|
@ -48,6 +50,10 @@ if typing.TYPE_CHECKING:
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CreatingServiceNameDynamicLabelFailed(Exception):
|
||||
"""Raised when failed to create a dynamic service name label"""
|
||||
|
||||
|
||||
class MessagingBackendTemplatesItem:
|
||||
title: str | None
|
||||
message: str | None
|
||||
|
|
@ -790,6 +796,54 @@ class AlertReceiveChannel(IntegrationOptionsMixin, MaintainableObject):
|
|||
result["team"] = "General"
|
||||
return result
|
||||
|
||||
def create_service_name_dynamic_label(self, is_called_async: bool = False):
|
||||
"""
|
||||
create_service_name_dynamic_label creates a dynamic label for service_name for Grafana Alerting integration.
|
||||
Warning: It might make a request to the labels repo API.
|
||||
That's why it's called in api handlers, not in post_save.
|
||||
Once we will have labels operator & get rid of syncing labels from repo, this method should be moved
|
||||
to post_save.
|
||||
"""
|
||||
from apps.labels.models import LabelKeyCache
|
||||
|
||||
if not self.organization.is_grafana_labels_enabled:
|
||||
return
|
||||
if self.integration != AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
|
||||
return
|
||||
|
||||
# validate that service_name label doesn't exist in already
|
||||
service_name_label = LabelKeyCache.objects.filter(organization=self.organization, name=SERVICE_LABEL).first()
|
||||
|
||||
if service_name_label is not None and self.alert_group_labels_custom is not None:
|
||||
for k, _, _ in self.alert_group_labels_custom:
|
||||
if k == service_name_label.id:
|
||||
return
|
||||
|
||||
service_name_dynamic_label = self._build_service_name_label_custom(self.organization)
|
||||
if service_name_dynamic_label is None:
|
||||
# if this method was called from a celery task, raise exception to retry it
|
||||
if is_called_async:
|
||||
raise CreatingServiceNameDynamicLabelFailed
|
||||
# otherwise start a celery task to retry the label creation async
|
||||
add_service_label_for_integration.apply_async((self.id,))
|
||||
return
|
||||
self.alert_group_labels_custom = [service_name_dynamic_label] + (self.alert_group_labels_custom or [])
|
||||
self.save(update_fields=["alert_group_labels_custom"])
|
||||
|
||||
@staticmethod
|
||||
def _build_service_name_label_custom(organization: "Organization") -> DynamicLabelsEntryDB | None:
|
||||
"""
|
||||
_build_service_name_label_custom returns `service_name` label template in dynamic label format:
|
||||
[key_id, None, template].
|
||||
If there is no label key service_name in the cache - it tries to fetch it from the labels repo API.
|
||||
"""
|
||||
from apps.labels.models import LabelKeyCache
|
||||
|
||||
service_label_key = LabelKeyCache.get_or_create_by_name(organization, SERVICE_LABEL)
|
||||
return (
|
||||
[service_label_key.id, None, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION] if service_label_key else None
|
||||
)
|
||||
|
||||
|
||||
@receiver(post_save, sender=AlertReceiveChannel)
|
||||
def listen_for_alertreceivechannel_model_save(
|
||||
|
|
|
|||
|
|
@ -29,13 +29,13 @@ class EscalationPolicy(OrderedModel):
|
|||
|
||||
(
|
||||
STEP_WAIT,
|
||||
STEP_NOTIFY,
|
||||
_DEPRECATED_STEP_NOTIFY, # only here to keep range intact
|
||||
STEP_FINAL_NOTIFYALL,
|
||||
STEP_REPEAT_ESCALATION_N_TIMES,
|
||||
STEP_FINAL_RESOLVE,
|
||||
STEP_NOTIFY_GROUP,
|
||||
STEP_NOTIFY_SCHEDULE,
|
||||
STEP_NOTIFY_IMPORTANT,
|
||||
_DEPRECATED_STEP_NOTIFY_IMPORTANT, # only here to keep range intact
|
||||
STEP_NOTIFY_GROUP_IMPORTANT,
|
||||
STEP_NOTIFY_SCHEDULE_IMPORTANT,
|
||||
_DEPRECATED_STEP_TRIGGER_CUSTOM_BUTTON, # only here to keep range intact
|
||||
|
|
@ -48,18 +48,19 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_NOTIFY_TEAM_MEMBERS,
|
||||
STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT,
|
||||
STEP_DECLARE_INCIDENT,
|
||||
) = range(20)
|
||||
STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
) = range(21)
|
||||
|
||||
# Must be the same order as previous
|
||||
STEP_CHOICES = (
|
||||
(STEP_WAIT, "Wait"),
|
||||
(STEP_NOTIFY, "Notify User"),
|
||||
(_DEPRECATED_STEP_NOTIFY, "Notify User"),
|
||||
(STEP_FINAL_NOTIFYALL, "Notify Whole Channel"),
|
||||
(STEP_REPEAT_ESCALATION_N_TIMES, "Repeat Escalation (5 times max)"),
|
||||
(STEP_FINAL_RESOLVE, "Resolve"),
|
||||
(STEP_NOTIFY_GROUP, "Notify Group"),
|
||||
(STEP_NOTIFY_SCHEDULE, "Notify Schedule"),
|
||||
(STEP_NOTIFY_IMPORTANT, "Notify User (Important)"),
|
||||
(_DEPRECATED_STEP_NOTIFY_IMPORTANT, "Notify User (Important)"),
|
||||
(STEP_NOTIFY_GROUP_IMPORTANT, "Notify Group (Important)"),
|
||||
(STEP_NOTIFY_SCHEDULE_IMPORTANT, "Notify Schedule (Important)"),
|
||||
(_DEPRECATED_STEP_TRIGGER_CUSTOM_BUTTON, "Trigger Outgoing Webhook"),
|
||||
|
|
@ -72,6 +73,7 @@ class EscalationPolicy(OrderedModel):
|
|||
(STEP_NOTIFY_TEAM_MEMBERS, "Notify all users in a Team"),
|
||||
(STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT, "Notify all users in a Team (Important)"),
|
||||
(STEP_DECLARE_INCIDENT, "Declare Incident"),
|
||||
(STEP_NOTIFY_USERS_QUEUE_IMPORTANT, "Notify User (next each time) (Important)"),
|
||||
)
|
||||
|
||||
# Ordered step choices available for internal api.
|
||||
|
|
@ -114,6 +116,7 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_TRIGGER_CUSTOM_WEBHOOK,
|
||||
STEP_REPEAT_ESCALATION_N_TIMES,
|
||||
STEP_DECLARE_INCIDENT,
|
||||
STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
]
|
||||
|
||||
# Maps internal api's steps choices to their verbal. First string in tuple is display name for existent step.
|
||||
|
|
@ -142,7 +145,10 @@ class EscalationPolicy(OrderedModel):
|
|||
),
|
||||
# Other
|
||||
STEP_TRIGGER_CUSTOM_WEBHOOK: ("Trigger webhook {{custom_webhook}}", "Trigger webhook"),
|
||||
STEP_NOTIFY_USERS_QUEUE: ("Round robin notification for {{users}}", "Notify users one by one (round-robin)"),
|
||||
STEP_NOTIFY_USERS_QUEUE: (
|
||||
"Round robin {{importance}} notification for {{users}}",
|
||||
"Notify users one by one (round-robin)",
|
||||
),
|
||||
STEP_NOTIFY_IF_TIME: (
|
||||
"Continue escalation if current UTC time is in {{timerange}}",
|
||||
"Continue escalation if current UTC time is in range",
|
||||
|
|
@ -166,7 +172,6 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_FINAL_NOTIFYALL,
|
||||
STEP_FINAL_RESOLVE,
|
||||
STEP_TRIGGER_CUSTOM_WEBHOOK,
|
||||
STEP_NOTIFY_USERS_QUEUE,
|
||||
STEP_NOTIFY_IF_TIME,
|
||||
STEP_REPEAT_ESCALATION_N_TIMES,
|
||||
STEP_DECLARE_INCIDENT,
|
||||
|
|
@ -177,12 +182,14 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_NOTIFY_SCHEDULE: STEP_NOTIFY_SCHEDULE_IMPORTANT,
|
||||
STEP_NOTIFY_MULTIPLE_USERS: STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
STEP_NOTIFY_TEAM_MEMBERS: STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT,
|
||||
STEP_NOTIFY_USERS_QUEUE: STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
}
|
||||
IMPORTANT_TO_DEFAULT_STEP_MAPPING = {
|
||||
STEP_NOTIFY_GROUP_IMPORTANT: STEP_NOTIFY_GROUP,
|
||||
STEP_NOTIFY_SCHEDULE_IMPORTANT: STEP_NOTIFY_SCHEDULE,
|
||||
STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT: STEP_NOTIFY_MULTIPLE_USERS,
|
||||
STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT: STEP_NOTIFY_TEAM_MEMBERS,
|
||||
STEP_NOTIFY_USERS_QUEUE_IMPORTANT: STEP_NOTIFY_USERS_QUEUE,
|
||||
}
|
||||
|
||||
# Default steps are just usual version of important steps. E.g. notify group - notify group important
|
||||
|
|
@ -191,6 +198,7 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_NOTIFY_SCHEDULE,
|
||||
STEP_NOTIFY_MULTIPLE_USERS,
|
||||
STEP_NOTIFY_TEAM_MEMBERS,
|
||||
STEP_NOTIFY_USERS_QUEUE,
|
||||
}
|
||||
|
||||
IMPORTANT_STEPS_SET = {
|
||||
|
|
@ -198,6 +206,7 @@ class EscalationPolicy(OrderedModel):
|
|||
STEP_NOTIFY_SCHEDULE_IMPORTANT,
|
||||
STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT,
|
||||
STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
}
|
||||
|
||||
SLACK_INTEGRATION_REQUIRED_STEPS = [
|
||||
|
|
@ -224,12 +233,10 @@ class EscalationPolicy(OrderedModel):
|
|||
|
||||
PUBLIC_STEP_CHOICES_MAP = {
|
||||
STEP_WAIT: "wait",
|
||||
STEP_NOTIFY: "notify_one_person",
|
||||
STEP_FINAL_NOTIFYALL: "notify_whole_channel",
|
||||
STEP_FINAL_RESOLVE: "resolve",
|
||||
STEP_NOTIFY_GROUP: "notify_user_group",
|
||||
STEP_NOTIFY_GROUP_IMPORTANT: "notify_user_group",
|
||||
STEP_NOTIFY_IMPORTANT: "notify_one_person",
|
||||
STEP_NOTIFY_SCHEDULE: "notify_on_call_from_schedule",
|
||||
STEP_NOTIFY_SCHEDULE_IMPORTANT: "notify_on_call_from_schedule",
|
||||
STEP_TRIGGER_CUSTOM_WEBHOOK: "trigger_webhook",
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ def notify_user_task(
|
|||
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
|
||||
notification_policy=notification_policy,
|
||||
alert_group=alert_group,
|
||||
reason="Alert group slack notifications are disabled",
|
||||
reason="Alert group Slack notifications are disabled",
|
||||
slack_prevent_posting=prevent_posting_to_thread,
|
||||
notification_step=notification_policy.step,
|
||||
notification_channel=notification_policy.notify_by,
|
||||
|
|
|
|||
|
|
@ -56,8 +56,8 @@ def test_alert_create_custom_channel_filter(make_organization, make_alert_receiv
|
|||
assert alert.group.channel_filter == other_channel_filter
|
||||
|
||||
|
||||
@patch("apps.alerts.models.alert.assign_labels")
|
||||
@patch("apps.alerts.models.alert.gather_labels_from_alert_receive_channel_and_raw_request_data")
|
||||
@patch("apps.alerts.models.alert.save_alert_group_labels")
|
||||
@patch("apps.alerts.models.alert.gather_alert_labels")
|
||||
@patch("apps.alerts.models.ChannelFilter.select_filter", wraps=ChannelFilter.select_filter)
|
||||
@pytest.mark.django_db
|
||||
def test_alert_create_labels_are_assigned(
|
||||
|
|
|
|||
|
|
@ -93,9 +93,13 @@ def test_escalation_step_notify_all(
|
|||
|
||||
|
||||
@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None)
|
||||
@pytest.mark.parametrize(
|
||||
"step", [EscalationPolicy.STEP_NOTIFY_USERS_QUEUE, EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT]
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_escalation_step_notify_users_queue(
|
||||
mocked_execute_tasks,
|
||||
step,
|
||||
make_user_for_organization,
|
||||
escalation_step_test_setup,
|
||||
make_escalation_policy,
|
||||
|
|
@ -105,7 +109,7 @@ def test_escalation_step_notify_users_queue(
|
|||
|
||||
notify_queue_step = make_escalation_policy(
|
||||
escalation_chain=channel_filter.escalation_chain,
|
||||
escalation_policy_step=EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
escalation_policy_step=step,
|
||||
)
|
||||
notify_queue_step.notify_to_users_queue.set([user, user_2])
|
||||
escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_queue_step)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import Graf
|
|||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.base.messaging import get_messaging_backends
|
||||
from apps.integrations.legacy_prefix import has_legacy_prefix
|
||||
from apps.labels.models import AlertReceiveChannelAssociatedLabel, LabelKeyCache, LabelValueCache
|
||||
from apps.labels.models import LabelKeyCache, LabelValueCache
|
||||
from apps.labels.types import LabelKey
|
||||
from apps.user_management.models import Organization
|
||||
from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
|
||||
|
|
@ -33,7 +33,7 @@ def _additional_settings_serializer_from_type(integration_type: str) -> serializ
|
|||
return cls
|
||||
|
||||
|
||||
# TODO: refactor this types as w no longer support storing static labels in this field.
|
||||
# TODO: refactor this types as we no longer support storing static labels in this field.
|
||||
# AlertGroupCustomLabelValue represents custom alert group label value for API requests
|
||||
# It handles two types of label's value:
|
||||
# 1. Just Label Value from a label repo for a static label
|
||||
|
|
@ -79,7 +79,10 @@ class AdditionalSettingsField(serializers.DictField):
|
|||
|
||||
|
||||
class CustomLabelSerializer(serializers.Serializer):
|
||||
"""This serializer is consistent with apps.api.serializers.labels.LabelPairSerializer, but allows null for value ID."""
|
||||
"""
|
||||
This serializer is consistent with apps.api.serializers.labels.LabelPairSerializer,
|
||||
but allows null for value ID to support templated labels.
|
||||
"""
|
||||
|
||||
class CustomLabelKeySerializer(serializers.Serializer):
|
||||
id = serializers.CharField()
|
||||
|
|
@ -97,98 +100,12 @@ class CustomLabelSerializer(serializers.Serializer):
|
|||
|
||||
|
||||
class IntegrationAlertGroupLabelsSerializer(serializers.Serializer):
|
||||
"""Alert group labels configuration for the integration. See AlertReceiveChannel.alert_group_labels for details."""
|
||||
|
||||
# todo: inheritable field is deprecated. Remove in a future release
|
||||
inheritable = serializers.DictField(child=serializers.BooleanField(), required=False)
|
||||
custom = CustomLabelSerializer(many=True)
|
||||
template = serializers.CharField(allow_null=True)
|
||||
|
||||
@staticmethod
|
||||
def pop_alert_group_labels(validated_data: dict) -> IntegrationAlertGroupLabels | None:
|
||||
"""Get alert group labels from validated data."""
|
||||
|
||||
# the "alert_group_labels" field is optional, so either all 2 fields are present or none
|
||||
# "inheritable" field is deprecated
|
||||
if "custom" not in validated_data:
|
||||
return None
|
||||
|
||||
return {
|
||||
"inheritable": validated_data.pop("inheritable", None), # deprecated
|
||||
"custom": validated_data.pop("custom"),
|
||||
"template": validated_data.pop("template"),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def update(
|
||||
cls, instance: AlertReceiveChannel, alert_group_labels: IntegrationAlertGroupLabels | None
|
||||
) -> AlertReceiveChannel:
|
||||
if alert_group_labels is None:
|
||||
return instance
|
||||
|
||||
# update DB cache for custom labels
|
||||
cls._create_custom_labels(instance.organization, alert_group_labels["custom"])
|
||||
# save static labels as integration labels
|
||||
# todo: it's needed to cover delay between backend and frontend rollout, and can be removed later
|
||||
cls._save_static_labels_as_integration_labels(instance, alert_group_labels["custom"])
|
||||
# update custom labels
|
||||
instance.alert_group_labels_custom = cls._custom_labels_to_internal_value(alert_group_labels["custom"])
|
||||
|
||||
# update template
|
||||
instance.alert_group_labels_template = alert_group_labels["template"]
|
||||
|
||||
instance.save(update_fields=["alert_group_labels_custom", "alert_group_labels_template"])
|
||||
return instance
|
||||
|
||||
@staticmethod
|
||||
def _create_custom_labels(organization: Organization, labels: AlertGroupCustomLabelsAPI) -> None:
|
||||
"""Create LabelKeyCache and LabelValueCache objects for custom labels."""
|
||||
|
||||
label_keys = [
|
||||
LabelKeyCache(
|
||||
id=label["key"]["id"],
|
||||
name=label["key"]["name"],
|
||||
prescribed=label["key"]["prescribed"],
|
||||
organization=organization,
|
||||
)
|
||||
for label in labels
|
||||
]
|
||||
|
||||
label_values = [
|
||||
LabelValueCache(
|
||||
id=label["value"]["id"],
|
||||
name=label["value"]["name"],
|
||||
prescribed=label["value"]["prescribed"],
|
||||
key_id=label["key"]["id"],
|
||||
)
|
||||
for label in labels
|
||||
if label["value"]["id"] # don't create LabelValueCache objects for templated labels
|
||||
]
|
||||
|
||||
LabelKeyCache.objects.bulk_create(label_keys, ignore_conflicts=True, batch_size=5000)
|
||||
LabelValueCache.objects.bulk_create(label_values, ignore_conflicts=True, batch_size=5000)
|
||||
|
||||
@staticmethod
|
||||
def _save_static_labels_as_integration_labels(instance: AlertReceiveChannel, labels: AlertGroupCustomLabelsAPI):
|
||||
labels_associations_to_create = []
|
||||
labels_copy = labels[:]
|
||||
for label in labels_copy:
|
||||
if label["value"]["id"] is not None:
|
||||
labels_associations_to_create.append(
|
||||
AlertReceiveChannelAssociatedLabel(
|
||||
key_id=label["key"]["id"],
|
||||
value_id=label["value"]["id"],
|
||||
organization=instance.organization,
|
||||
alert_receive_channel=instance,
|
||||
)
|
||||
)
|
||||
labels.remove(label)
|
||||
AlertReceiveChannelAssociatedLabel.objects.bulk_create(
|
||||
labels_associations_to_create, ignore_conflicts=True, batch_size=5000
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def to_representation(cls, instance: AlertReceiveChannel) -> IntegrationAlertGroupLabels:
|
||||
def to_representation(self, instance: AlertReceiveChannel) -> IntegrationAlertGroupLabels:
|
||||
"""
|
||||
The API representation of alert group labels is very different from the underlying model.
|
||||
|
||||
|
|
@ -200,20 +117,28 @@ class IntegrationAlertGroupLabelsSerializer(serializers.Serializer):
|
|||
return {
|
||||
# todo: "inheritable" field is deprecated, remove in a future release.
|
||||
"inheritable": {label.key_id: True for label in instance.labels.all()},
|
||||
"custom": cls._custom_labels_to_representation(instance.alert_group_labels_custom),
|
||||
"custom": self._custom_labels_to_representation(instance.alert_group_labels_custom),
|
||||
"template": instance.alert_group_labels_template,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _custom_labels_to_internal_value(
|
||||
custom_labels: AlertGroupCustomLabelsAPI,
|
||||
) -> AlertReceiveChannel.DynamicLabelsConfigDB:
|
||||
"""Convert custom labels from API representation to the schema used by the JSONField on the model."""
|
||||
def to_internal_value(self, validated_data: dict) -> dict:
|
||||
"""
|
||||
to_internal_value converts dynamic labels from API format to internal format and updates labels cache
|
||||
"""
|
||||
alert_group_labels = self._pop_alert_group_labels(validated_data)
|
||||
if alert_group_labels is None:
|
||||
return validated_data
|
||||
|
||||
return [
|
||||
[label["key"]["id"], label["value"]["id"], None if label["value"]["id"] else label["value"]["name"]]
|
||||
for label in custom_labels
|
||||
]
|
||||
organization = self.context["request"].auth.organization
|
||||
self._create_custom_labels(organization, alert_group_labels["custom"] if alert_group_labels else [])
|
||||
|
||||
custom_labels = (
|
||||
self._custom_labels_to_internal_value(alert_group_labels["custom"]) if alert_group_labels else []
|
||||
)
|
||||
validated_data["alert_group_labels_custom"] = custom_labels or None
|
||||
validated_data["alert_group_labels_template"] = alert_group_labels["template"] if alert_group_labels else None
|
||||
|
||||
return validated_data
|
||||
|
||||
@staticmethod
|
||||
def _custom_labels_to_representation(
|
||||
|
|
@ -262,6 +187,63 @@ class IntegrationAlertGroupLabelsSerializer(serializers.Serializer):
|
|||
if key_id in label_key_index and (value_id in label_value_index or not value_id)
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _custom_labels_to_internal_value(
|
||||
custom_labels: AlertGroupCustomLabelsAPI,
|
||||
) -> AlertReceiveChannel.DynamicLabelsConfigDB:
|
||||
"""
|
||||
Convert dynamic labels from API representation to the schema used by the JSONField on the model:
|
||||
[[key.id, None, template(stored in value.name here)]].
|
||||
"""
|
||||
|
||||
return [
|
||||
[label["key"]["id"], None, label["value"]["name"]]
|
||||
for label in custom_labels
|
||||
if label["value"]["id"] is None
|
||||
# value.id is not None for deprecated static labels, for dynamic labels it's always None
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _pop_alert_group_labels(validated_data: dict) -> IntegrationAlertGroupLabels | None:
|
||||
# the "alert_group_labels" field is optional, so either all 2 fields (custom and template) are present or none
|
||||
# "inheritable" field is deprecated
|
||||
if "custom" not in validated_data:
|
||||
return None
|
||||
|
||||
return {
|
||||
"inheritable": validated_data.pop("inheritable", None), # deprecated
|
||||
"custom": validated_data.pop("custom"),
|
||||
"template": validated_data.pop("template"),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _create_custom_labels(organization: Organization, labels: AlertGroupCustomLabelsAPI) -> None:
|
||||
"""Create LabelKeyCache and LabelValueCache objects for labels used in labelsSchema"""
|
||||
|
||||
label_keys = [
|
||||
LabelKeyCache(
|
||||
id=label["key"]["id"],
|
||||
name=label["key"]["name"],
|
||||
prescribed=label["key"]["prescribed"],
|
||||
organization=organization,
|
||||
)
|
||||
for label in labels
|
||||
]
|
||||
|
||||
label_values = [
|
||||
LabelValueCache(
|
||||
id=label["value"]["id"],
|
||||
name=label["value"]["name"],
|
||||
prescribed=label["value"]["prescribed"],
|
||||
key_id=label["key"]["id"],
|
||||
)
|
||||
for label in labels
|
||||
if label["value"]["id"] # don't create LabelValueCache objects for templated labels
|
||||
]
|
||||
|
||||
LabelKeyCache.objects.bulk_create(label_keys, ignore_conflicts=True, batch_size=5000)
|
||||
LabelValueCache.objects.bulk_create(label_values, ignore_conflicts=True, batch_size=5000)
|
||||
|
||||
|
||||
class AlertReceiveChannelSerializer(
|
||||
EagerLoadingMixin, LabelsSerializerMixin, serializers.ModelSerializer[AlertReceiveChannel]
|
||||
|
|
@ -411,9 +393,8 @@ class AlertReceiveChannelSerializer(
|
|||
if _integration.slug == integration:
|
||||
is_able_to_autoresolve = _integration.is_able_to_autoresolve
|
||||
|
||||
# pop associated labels and alert group labels, so they are not passed to AlertReceiveChannel.create
|
||||
# pop associated labels, so they are not passed to AlertReceiveChannel.create. They will be created later.
|
||||
labels = validated_data.pop("labels", None)
|
||||
alert_group_labels = IntegrationAlertGroupLabelsSerializer.pop_alert_group_labels(validated_data)
|
||||
|
||||
try:
|
||||
instance = AlertReceiveChannel.create(
|
||||
|
|
@ -425,14 +406,16 @@ class AlertReceiveChannelSerializer(
|
|||
except AlertReceiveChannel.DuplicateDirectPagingError:
|
||||
raise BadRequest(detail=AlertReceiveChannel.DuplicateDirectPagingError.DETAIL)
|
||||
|
||||
# Create label associations first, then update alert group labels
|
||||
# Create label associations
|
||||
self.update_labels_association_if_needed(labels, instance, organization)
|
||||
instance = IntegrationAlertGroupLabelsSerializer.update(instance, alert_group_labels)
|
||||
|
||||
# Create default webhooks if needed
|
||||
if create_default_webhooks and hasattr(instance.config, "create_default_webhooks"):
|
||||
instance.config.create_default_webhooks(instance)
|
||||
|
||||
# Create default service_name label
|
||||
instance.create_service_name_dynamic_label()
|
||||
|
||||
return instance
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
|
|
@ -440,11 +423,6 @@ class AlertReceiveChannelSerializer(
|
|||
labels = validated_data.pop("labels", None)
|
||||
self.update_labels_association_if_needed(labels, instance, self.context["request"].auth.organization)
|
||||
|
||||
# update alert group labels
|
||||
instance = IntegrationAlertGroupLabelsSerializer.update(
|
||||
instance, IntegrationAlertGroupLabelsSerializer.pop_alert_group_labels(validated_data)
|
||||
)
|
||||
|
||||
try:
|
||||
updated_instance = super().update(instance, validated_data)
|
||||
except AlertReceiveChannel.DuplicateDirectPagingError:
|
||||
|
|
|
|||
|
|
@ -2,8 +2,13 @@ from rest_framework import serializers
|
|||
|
||||
from apps.api.serializers.slack_channel import SlackChannelSerializer
|
||||
from apps.api.serializers.user_group import UserGroupSerializer
|
||||
from apps.schedules.constants import SCHEDULE_CHECK_NEXT_DAYS
|
||||
from apps.schedules.models import OnCallSchedule
|
||||
from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
|
||||
from apps.schedules.tasks import (
|
||||
check_gaps_and_empty_shifts_in_schedule,
|
||||
schedule_notify_about_empty_shifts_in_schedule,
|
||||
schedule_notify_about_gaps_in_schedule,
|
||||
)
|
||||
from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
|
||||
from common.api_helpers.mixins import EagerLoadingMixin
|
||||
from common.api_helpers.utils import CurrentOrganizationDefault
|
||||
|
|
@ -44,8 +49,8 @@ class ScheduleBaseSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
"Cannot update the user group, make sure to grant user group modification rights to "
|
||||
"non-admin users in Slack workspace settings"
|
||||
)
|
||||
SCHEDULE_HAS_GAPS_WARNING = "Schedule has unassigned time periods during next 7 days"
|
||||
SCHEDULE_HAS_EMPTY_SHIFTS_WARNING = "Schedule has empty shifts during next 7 days"
|
||||
SCHEDULE_HAS_GAPS_WARNING = f"Schedule has unassigned time periods during next {SCHEDULE_CHECK_NEXT_DAYS} days"
|
||||
SCHEDULE_HAS_EMPTY_SHIFTS_WARNING = f"Schedule has empty shifts during next {SCHEDULE_CHECK_NEXT_DAYS} days"
|
||||
|
||||
def get_warnings(self, obj):
|
||||
can_update_user_groups = self.context.get("can_update_user_groups", False)
|
||||
|
|
@ -81,7 +86,7 @@ class ScheduleBaseSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
|
||||
def create(self, validated_data):
|
||||
created_schedule = super().create(validated_data)
|
||||
created_schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
check_gaps_and_empty_shifts_in_schedule.apply_async((created_schedule.pk,))
|
||||
schedule_notify_about_empty_shifts_in_schedule.apply_async((created_schedule.pk,))
|
||||
schedule_notify_about_gaps_in_schedule.apply_async((created_schedule.pk,))
|
||||
return created_schedule
|
||||
|
|
|
|||
|
|
@ -2,7 +2,11 @@ from rest_framework import serializers
|
|||
|
||||
from apps.api.serializers.schedule_base import ScheduleBaseSerializer
|
||||
from apps.schedules.models import OnCallScheduleCalendar
|
||||
from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
|
||||
from apps.schedules.tasks import (
|
||||
check_gaps_and_empty_shifts_in_schedule,
|
||||
schedule_notify_about_empty_shifts_in_schedule,
|
||||
schedule_notify_about_gaps_in_schedule,
|
||||
)
|
||||
from apps.slack.models import SlackChannel, SlackUserGroup
|
||||
from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField, TimeZoneField
|
||||
from common.api_helpers.utils import validate_ical_url
|
||||
|
|
@ -58,7 +62,7 @@ class ScheduleCalendarCreateSerializer(ScheduleCalendarSerializer):
|
|||
or old_enable_web_overrides != updated_enable_web_overrides
|
||||
):
|
||||
updated_schedule.drop_cached_ical()
|
||||
updated_schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
check_gaps_and_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
|
||||
return updated_schedule
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from apps.api.serializers.schedule_base import ScheduleBaseSerializer
|
||||
from apps.schedules.models import OnCallScheduleICal
|
||||
from apps.schedules.tasks import (
|
||||
check_gaps_and_empty_shifts_in_schedule,
|
||||
refresh_ical_final_schedule,
|
||||
schedule_notify_about_empty_shifts_in_schedule,
|
||||
schedule_notify_about_gaps_in_schedule,
|
||||
|
|
@ -87,7 +88,7 @@ class ScheduleICalUpdateSerializer(ScheduleICalCreateSerializer):
|
|||
|
||||
if old_ical_url_primary != updated_ical_url_primary or old_ical_url_overrides != updated_ical_url_overrides:
|
||||
updated_schedule.drop_cached_ical()
|
||||
updated_schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
check_gaps_and_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
|
||||
# for iCal-based schedules we need to refresh final schedule information
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
from apps.api.serializers.schedule_base import ScheduleBaseSerializer
|
||||
from apps.schedules.models import OnCallScheduleWeb
|
||||
from apps.schedules.tasks import schedule_notify_about_empty_shifts_in_schedule, schedule_notify_about_gaps_in_schedule
|
||||
from apps.schedules.tasks import (
|
||||
check_gaps_and_empty_shifts_in_schedule,
|
||||
schedule_notify_about_empty_shifts_in_schedule,
|
||||
schedule_notify_about_gaps_in_schedule,
|
||||
)
|
||||
from apps.slack.models import SlackChannel, SlackUserGroup
|
||||
from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField, TimeZoneField
|
||||
|
||||
|
|
@ -41,7 +45,7 @@ class ScheduleWebCreateSerializer(ScheduleWebSerializer):
|
|||
updated_time_zone = updated_schedule.time_zone
|
||||
if old_time_zone != updated_time_zone:
|
||||
updated_schedule.drop_cached_ical()
|
||||
updated_schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
check_gaps_and_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_empty_shifts_in_schedule.apply_async((instance.pk,))
|
||||
schedule_notify_about_gaps_in_schedule.apply_async((instance.pk,))
|
||||
return updated_schedule
|
||||
|
|
|
|||
|
|
@ -2413,3 +2413,72 @@ def test_filter_default_started_at(
|
|||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response.json()["pk"] == old_alert_group.public_primary_key
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_alert_group_affected_services(
|
||||
alert_group_internal_api_setup,
|
||||
make_user_for_organization,
|
||||
make_user_auth_headers,
|
||||
make_alert_group_label_association,
|
||||
settings,
|
||||
):
|
||||
settings.FEATURE_SERVICE_DEPENDENCIES_ENABLED = True
|
||||
_, token, alert_groups = alert_group_internal_api_setup
|
||||
resolved_ag, ack_ag, new_ag, silenced_ag = alert_groups
|
||||
organization = new_ag.channel.organization
|
||||
user = make_user_for_organization(organization)
|
||||
|
||||
# set firing alert group service label
|
||||
make_alert_group_label_association(organization, new_ag, key_name="service_name", value_name="service-a")
|
||||
# set other service name labels for other alert groups
|
||||
make_alert_group_label_association(organization, ack_ag, key_name="service_name", value_name="service-2")
|
||||
make_alert_group_label_association(organization, resolved_ag, key_name="service_name", value_name="service-3")
|
||||
make_alert_group_label_association(organization, silenced_ag, key_name="service_name", value_name="service-4")
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:alertgroup-filter-affected-services")
|
||||
|
||||
url = f"{url}?service=service-1&service=service-2&service=service-3&service=service-a"
|
||||
response = client.get(url, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
expected = [
|
||||
{
|
||||
"name": "service-2",
|
||||
"service_url": "a/grafana-slo-app/service/service-2",
|
||||
"alert_groups_url": "a/grafana-oncall-app/alert-groups?status=0&status=1&started_at=now-7d_now&label=service_name:service-2",
|
||||
},
|
||||
{
|
||||
"name": "service-a",
|
||||
"service_url": "a/grafana-slo-app/service/service-a",
|
||||
"alert_groups_url": "a/grafana-oncall-app/alert-groups?status=0&status=1&started_at=now-7d_now&label=service_name:service-a",
|
||||
},
|
||||
]
|
||||
assert response.json() == expected
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_alert_group_service_dependencies_feature_not_enabled(
|
||||
alert_group_internal_api_setup,
|
||||
make_user_for_organization,
|
||||
make_user_auth_headers,
|
||||
make_alert_group_label_association,
|
||||
settings,
|
||||
):
|
||||
settings.FEATURE_SERVICE_DEPENDENCIES_ENABLED = False
|
||||
_, token, alert_groups = alert_group_internal_api_setup
|
||||
_, _, new_ag, _ = alert_groups
|
||||
organization = new_ag.channel.organization
|
||||
user = make_user_for_organization(organization)
|
||||
|
||||
# set firing alert group service label
|
||||
make_alert_group_label_association(organization, new_ag, key_name="service_name", value_name="service-a")
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:alertgroup-filter-affected-services")
|
||||
|
||||
url = f"{url}?service=service-1"
|
||||
response = client.get(url, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
|
|
|||
|
|
@ -7,10 +7,12 @@ from rest_framework import serializers, status
|
|||
from rest_framework.response import Response
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
from apps.alerts.constants import SERVICE_LABEL, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION
|
||||
from apps.alerts.grafana_alerting_sync_manager import GrafanaAlertingSyncManager
|
||||
from apps.alerts.models import AlertReceiveChannel, EscalationPolicy
|
||||
from apps.api.permissions import LegacyAccessControlRole
|
||||
from apps.base.messaging import load_backend
|
||||
from apps.labels.models import LabelKeyCache, LabelValueCache
|
||||
from apps.labels.models import LabelKeyCache
|
||||
from common.exceptions import BacksyncIntegrationRequestError
|
||||
|
||||
|
||||
|
|
@ -1717,24 +1719,20 @@ def test_alert_group_labels_put(
|
|||
label_3 = make_static_label_config(organization, alert_receive_channel)
|
||||
|
||||
custom = [
|
||||
# plain label
|
||||
# static label (deprecated, will be skipped)
|
||||
{
|
||||
"key": {"id": label_2.key.id, "name": label_2.key.name, "prescribed": False},
|
||||
"value": {"id": label_2.value.id, "name": label_2.value.name, "prescribed": False},
|
||||
},
|
||||
# plain label not present in DB cache
|
||||
{
|
||||
"key": {"id": "hello", "name": "world", "prescribed": False},
|
||||
"value": {"id": "foo", "name": "bar", "prescribed": False},
|
||||
},
|
||||
# templated label
|
||||
# dynamic label
|
||||
{
|
||||
"key": {"id": label_3.key.id, "name": label_3.key.name, "prescribed": False},
|
||||
"value": {
|
||||
"id": None,
|
||||
"name": "{{ payload.foo }}",
|
||||
"prescribed": False,
|
||||
},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
},
|
||||
# dynamic label not present in DB cache
|
||||
{
|
||||
"key": {"id": "hello", "name": "world", "prescribed": False},
|
||||
"value": {"id": None, "name": "{{ payload.bar }}", "prescribed": False},
|
||||
},
|
||||
]
|
||||
template = "{{ payload.labels | tojson }}" # advanced template
|
||||
|
|
@ -1751,31 +1749,31 @@ def test_alert_group_labels_put(
|
|||
response = client.put(url, data, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
# check static labels were saved as integration labels
|
||||
assert response.json()["alert_group_labels"] == {
|
||||
"inheritable": {label_1.key_id: True, label_2.key_id: True, label_3.key_id: True, "hello": True},
|
||||
"inheritable": {label_1.key_id: True, label_2.key_id: True, label_3.key_id: True},
|
||||
"custom": [
|
||||
{
|
||||
"key": {"id": label_3.key.id, "name": label_3.key.name, "prescribed": False},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": {"id": "hello", "name": "world", "prescribed": False},
|
||||
"value": {"id": None, "name": "{{ payload.bar }}", "prescribed": False},
|
||||
},
|
||||
],
|
||||
"template": template,
|
||||
}
|
||||
|
||||
alert_receive_channel.refresh_from_db()
|
||||
# check static labels are not in the custom labels list
|
||||
# check deprecated static label is not in the custom labels list
|
||||
assert alert_receive_channel.alert_group_labels_custom == [
|
||||
[label_3.key_id, None, "{{ payload.foo }}"],
|
||||
["hello", None, "{{ payload.bar }}"],
|
||||
]
|
||||
assert alert_receive_channel.alert_group_labels_template == template
|
||||
# check static labels were assigned to integration
|
||||
assert alert_receive_channel.labels.filter(key_id__in=[label_2.key_id, "hello"]).count() == 2
|
||||
|
||||
# check label keys & values are created
|
||||
key = LabelKeyCache.objects.filter(id="hello", name="world", organization=organization).first()
|
||||
assert key is not None
|
||||
assert LabelValueCache.objects.filter(key=key, id="foo", name="bar").exists()
|
||||
# check label key is created
|
||||
assert LabelKeyCache.objects.filter(id="hello", name="world", organization=organization).exists()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
|
@ -1850,6 +1848,130 @@ def test_alert_group_labels_post(alert_receive_channel_internal_api_setup, make_
|
|||
assert alert_receive_channel.alert_group_labels_template == "{{ payload.labels | tojson }}"
|
||||
|
||||
|
||||
@patch.object(GrafanaAlertingSyncManager, "check_for_connection_errors", return_value=None)
|
||||
@pytest.mark.django_db
|
||||
def test_create_service_name_label_for_new_alerting_integration(
|
||||
_,
|
||||
make_organization_and_user_with_plugin_token,
|
||||
make_label_key,
|
||||
make_user_auth_headers,
|
||||
):
|
||||
"""Test adding default `service_name` dynamic label for new alerting integration."""
|
||||
|
||||
organization, user, token = make_organization_and_user_with_plugin_token()
|
||||
service_name_label_key = make_label_key(
|
||||
organization=organization, key_id="test", key_name=SERVICE_LABEL, prescribed=True
|
||||
)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:alert_receive_channel-list")
|
||||
|
||||
data = {
|
||||
"integration": AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
|
||||
"team": None,
|
||||
"labels": [],
|
||||
"alert_group_labels": {
|
||||
"inheritable": {},
|
||||
"custom": [
|
||||
{
|
||||
"key": {"id": "testid", "name": "testname", "prescribed": False},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
}
|
||||
],
|
||||
"template": None,
|
||||
},
|
||||
}
|
||||
expected_alert_group_labels_response = {
|
||||
"inheritable": {},
|
||||
"custom": [
|
||||
{
|
||||
"key": {"id": service_name_label_key.id, "name": SERVICE_LABEL, "prescribed": True},
|
||||
"value": {"id": None, "name": SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION, "prescribed": False},
|
||||
},
|
||||
{
|
||||
"key": {"id": "testid", "name": "testname", "prescribed": False},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
},
|
||||
],
|
||||
"template": None,
|
||||
}
|
||||
expected_alert_group_labels = [
|
||||
[service_name_label_key.id, None, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION],
|
||||
["testid", None, "{{ payload.foo }}"],
|
||||
]
|
||||
|
||||
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
assert response.json()["alert_group_labels"] == expected_alert_group_labels_response
|
||||
|
||||
alert_receive_channel = organization.alert_receive_channels.filter(public_primary_key=response.json()["id"]).first()
|
||||
|
||||
assert alert_receive_channel is not None
|
||||
assert alert_receive_channel.alert_group_labels_custom == expected_alert_group_labels
|
||||
|
||||
|
||||
@patch.object(GrafanaAlertingSyncManager, "check_for_connection_errors", return_value=None)
|
||||
@pytest.mark.django_db
|
||||
def test_skip_creating_service_name_label_for_new_alerting_integration(
|
||||
_,
|
||||
make_organization_and_user_with_plugin_token,
|
||||
make_label_key,
|
||||
make_user_auth_headers,
|
||||
):
|
||||
"""
|
||||
Test skipping adding default `service_name` dynamic label for new alerting integration,
|
||||
when this label was already added by user
|
||||
"""
|
||||
|
||||
organization, user, token = make_organization_and_user_with_plugin_token()
|
||||
service_name_label_key = make_label_key(
|
||||
organization=organization, key_id="test", key_name=SERVICE_LABEL, prescribed=True
|
||||
)
|
||||
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:alert_receive_channel-list")
|
||||
|
||||
data = {
|
||||
"integration": AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
|
||||
"team": None,
|
||||
"labels": [],
|
||||
"alert_group_labels": {
|
||||
"inheritable": {},
|
||||
"custom": [
|
||||
{
|
||||
"key": {"id": service_name_label_key.id, "name": SERVICE_LABEL, "prescribed": True},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
}
|
||||
],
|
||||
"template": None,
|
||||
},
|
||||
}
|
||||
expected_alert_group_labels_response = {
|
||||
"inheritable": {},
|
||||
"custom": [
|
||||
{
|
||||
"key": {"id": service_name_label_key.id, "name": SERVICE_LABEL, "prescribed": True},
|
||||
"value": {"id": None, "name": "{{ payload.foo }}", "prescribed": False},
|
||||
}
|
||||
],
|
||||
"template": None,
|
||||
}
|
||||
expected_alert_group_labels = [
|
||||
[service_name_label_key.id, None, "{{ payload.foo }}"],
|
||||
]
|
||||
|
||||
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
|
||||
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
assert response.json()["alert_group_labels"] == expected_alert_group_labels_response
|
||||
|
||||
alert_receive_channel = organization.alert_receive_channels.filter(public_primary_key=response.json()["id"]).first()
|
||||
|
||||
assert alert_receive_channel is not None
|
||||
assert alert_receive_channel.alert_group_labels_custom == expected_alert_group_labels
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_team_not_updated_if_not_in_data(
|
||||
make_organization_and_user_with_plugin_token,
|
||||
|
|
|
|||
|
|
@ -511,6 +511,7 @@ def test_escalation_policy_move_to_position_permissions(
|
|||
(EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT, EscalationPolicy.STEP_NOTIFY_GROUP),
|
||||
(EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT, EscalationPolicy.STEP_NOTIFY_SCHEDULE),
|
||||
(EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT, EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS),
|
||||
(EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT, EscalationPolicy.STEP_NOTIFY_USERS_QUEUE),
|
||||
],
|
||||
)
|
||||
def test_escalation_policy_maps_default_to_important(
|
||||
|
|
@ -545,6 +546,7 @@ def test_escalation_policy_maps_default_to_important(
|
|||
EscalationPolicy.STEP_NOTIFY_GROUP,
|
||||
EscalationPolicy.STEP_NOTIFY_SCHEDULE,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
],
|
||||
)
|
||||
def test_escalation_policy_default_steps_stay_default(
|
||||
|
|
@ -578,6 +580,7 @@ def test_escalation_policy_default_steps_stay_default(
|
|||
(EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT),
|
||||
(EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT),
|
||||
(EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS, EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT),
|
||||
(EscalationPolicy.STEP_NOTIFY_USERS_QUEUE, EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT),
|
||||
],
|
||||
)
|
||||
def test_create_escalation_policy_important(
|
||||
|
|
@ -615,6 +618,7 @@ def test_create_escalation_policy_important(
|
|||
EscalationPolicy.STEP_NOTIFY_GROUP,
|
||||
EscalationPolicy.STEP_NOTIFY_SCHEDULE,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
],
|
||||
)
|
||||
def test_create_escalation_policy_default(
|
||||
|
|
|
|||
|
|
@ -84,6 +84,30 @@ def test_get_update_key_put(
|
|||
assert response.json() == expected_result
|
||||
|
||||
|
||||
@patch(
|
||||
"apps.labels.client.LabelsAPIClient.get_label_by_key_name",
|
||||
return_value=(
|
||||
{"key": {"id": "keyid123", "name": "keyname12"}, "values": [{"id": "valueid123", "name": "yolo"}]},
|
||||
MockResponse(status_code=200),
|
||||
),
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_get_key_by_name(
|
||||
mocked_get_label_by_key_name,
|
||||
make_organization_and_user_with_plugin_token,
|
||||
make_user_auth_headers,
|
||||
):
|
||||
_, user, token = make_organization_and_user_with_plugin_token()
|
||||
client = APIClient()
|
||||
url = reverse("api-internal:get_key_by_name", kwargs={"key_name": "keyname12"})
|
||||
response = client.get(url, format="json", **make_user_auth_headers(user, token))
|
||||
expected_result = {"key": {"id": "keyid123", "name": "keyname12"}, "values": [{"id": "valueid123", "name": "yolo"}]}
|
||||
|
||||
assert mocked_get_label_by_key_name.called
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert response.json() == expected_result
|
||||
|
||||
|
||||
@patch(
|
||||
"apps.labels.client.LabelsAPIClient.add_value",
|
||||
return_value=(
|
||||
|
|
|
|||
|
|
@ -127,6 +127,11 @@ urlpatterns += [
|
|||
LabelsViewSet.as_view({"get": "get_key", "put": "rename_key"}),
|
||||
name="get_update_key",
|
||||
),
|
||||
re_path(
|
||||
r"^labels/name/(?P<key_name>[\w\-]+)/?$",
|
||||
LabelsViewSet.as_view({"get": "get_key_by_name"}),
|
||||
name="get_key_by_name",
|
||||
),
|
||||
re_path(
|
||||
r"^labels/id/(?P<key_id>[\w\-]+)/values/?$", LabelsViewSet.as_view({"post": "add_value"}), name="add_value"
|
||||
),
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ from apps.api.serializers.alert_group_escalation_snapshot import AlertGroupEscal
|
|||
from apps.api.serializers.team import TeamSerializer
|
||||
from apps.auth_token.auth import PluginAuthentication
|
||||
from apps.base.models.user_notification_policy_log_record import UserNotificationPolicyLogRecord
|
||||
from apps.grafana_plugin.ui_url_builder import UIURLBuilder
|
||||
from apps.labels.utils import is_labels_feature_enabled
|
||||
from apps.mobile_app.auth import MobileAppAuthTokenAuthentication
|
||||
from apps.user_management.models import Team, User
|
||||
|
|
@ -283,6 +284,7 @@ class AlertGroupView(
|
|||
"bulk_action": [RBACPermission.Permissions.ALERT_GROUPS_WRITE],
|
||||
"preview_template": [RBACPermission.Permissions.INTEGRATIONS_TEST],
|
||||
"escalation_snapshot": [RBACPermission.Permissions.ALERT_GROUPS_READ],
|
||||
"filter_affected_services": [RBACPermission.Permissions.ALERT_GROUPS_READ],
|
||||
}
|
||||
|
||||
queryset = AlertGroup.objects.none() # needed for drf-spectacular introspection
|
||||
|
|
@ -299,9 +301,18 @@ class AlertGroupView(
|
|||
|
||||
return super().get_serializer_class()
|
||||
|
||||
def get_queryset(self, ignore_filtering_by_available_teams=False):
|
||||
# no select_related or prefetch_related is used at this point, it will be done on paginate_queryset.
|
||||
|
||||
def _get_queryset(
|
||||
self,
|
||||
action=None,
|
||||
ignore_filtering_by_available_teams=False,
|
||||
team_values=None,
|
||||
started_at=None,
|
||||
label_query=None,
|
||||
):
|
||||
# make base get_queryset reusable via params
|
||||
if action is None:
|
||||
# assume stats by default
|
||||
action = "stats"
|
||||
alert_receive_channels_qs = AlertReceiveChannel.objects_with_deleted.filter(
|
||||
organization_id=self.request.auth.organization.id
|
||||
)
|
||||
|
|
@ -310,7 +321,6 @@ class AlertGroupView(
|
|||
|
||||
# Filter by team(s). Since we really filter teams from integrations, this is not an AlertGroup model filter.
|
||||
# This is based on the common.api_helpers.ByTeamModelFieldFilterMixin implementation
|
||||
team_values = self.request.query_params.getlist("team", [])
|
||||
if team_values:
|
||||
null_team_lookup = Q(team__isnull=True) if NO_TEAM_VALUE in team_values else None
|
||||
teams_lookup = Q(team__public_primary_key__in=[ppk for ppk in team_values if ppk != NO_TEAM_VALUE])
|
||||
|
|
@ -321,10 +331,10 @@ class AlertGroupView(
|
|||
alert_receive_channels_ids = list(alert_receive_channels_qs.values_list("id", flat=True))
|
||||
queryset = AlertGroup.objects.filter(channel__in=alert_receive_channels_ids)
|
||||
|
||||
if self.action in ("list", "stats") and not self.request.query_params.get("started_at"):
|
||||
if action in ("list", "stats") and not started_at:
|
||||
queryset = queryset.filter(started_at__gte=timezone.now() - timezone.timedelta(days=30))
|
||||
|
||||
if self.action in ("list", "stats") and settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX:
|
||||
if action in ("list", "stats") and settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX:
|
||||
# workaround related to MySQL "ORDER BY LIMIT Query Optimizer Bug"
|
||||
# read more: https://hackmysql.com/infamous-order-by-limit-query-optimizer-bug/
|
||||
from django_mysql.models import add_QuerySetMixin
|
||||
|
|
@ -333,18 +343,28 @@ class AlertGroupView(
|
|||
queryset = queryset.force_index("alert_group_list_index")
|
||||
|
||||
# Filter by labels. Since alert group labels are "static" filter by names, not IDs.
|
||||
label_query = self.request.query_params.getlist("label", [])
|
||||
kv_pairs = parse_label_query(label_query)
|
||||
for key, value in kv_pairs:
|
||||
# Utilize (organization, key_name, value_name, alert_group) index on AlertGroupAssociatedLabel
|
||||
queryset = queryset.filter(
|
||||
labels__organization=self.request.auth.organization,
|
||||
labels__key_name=key,
|
||||
labels__value_name=value,
|
||||
)
|
||||
if label_query:
|
||||
kv_pairs = parse_label_query(label_query)
|
||||
for key, value in kv_pairs:
|
||||
# Utilize (organization, key_name, value_name, alert_group) index on AlertGroupAssociatedLabel
|
||||
queryset = queryset.filter(
|
||||
labels__organization=self.request.auth.organization,
|
||||
labels__key_name=key,
|
||||
labels__value_name=value,
|
||||
)
|
||||
|
||||
return queryset
|
||||
|
||||
def get_queryset(self, ignore_filtering_by_available_teams=False):
|
||||
# no select_related or prefetch_related is used at this point, it will be done on paginate_queryset.
|
||||
return self._get_queryset(
|
||||
action=self.action,
|
||||
ignore_filtering_by_available_teams=ignore_filtering_by_available_teams,
|
||||
team_values=self.request.query_params.getlist("team", []),
|
||||
started_at=self.request.query_params.get("started_at"),
|
||||
label_query=self.request.query_params.getlist("label", []),
|
||||
)
|
||||
|
||||
def get_object(self):
|
||||
obj = super().get_object()
|
||||
obj = self.enrich([obj])[0]
|
||||
|
|
@ -881,3 +901,48 @@ class AlertGroupView(
|
|||
escalation_snapshot = alert_group.escalation_snapshot
|
||||
result = AlertGroupEscalationSnapshotAPISerializer(escalation_snapshot).data if escalation_snapshot else {}
|
||||
return Response(result)
|
||||
|
||||
@extend_schema(
|
||||
responses=inline_serializer(
|
||||
name="AffectedServices",
|
||||
fields={
|
||||
"name": serializers.CharField(),
|
||||
"service_url": serializers.CharField(),
|
||||
"alert_groups_url": serializers.CharField(),
|
||||
},
|
||||
many=True,
|
||||
)
|
||||
)
|
||||
@action(methods=["get"], detail=False)
|
||||
def filter_affected_services(self, request):
|
||||
"""Given a list of service names, return the ones that have active alerts."""
|
||||
if not settings.FEATURE_SERVICE_DEPENDENCIES_ENABLED:
|
||||
raise NotFound
|
||||
organization = self.request.auth.organization
|
||||
services = self.request.query_params.getlist("service", [])
|
||||
url_builder = UIURLBuilder(organization)
|
||||
affected_services = []
|
||||
days_to_check = 7
|
||||
for service_name in services:
|
||||
is_affected = (
|
||||
self._get_queryset(
|
||||
started_at=timezone.now() - timezone.timedelta(days=days_to_check),
|
||||
label_query=[f"service_name:{service_name}"],
|
||||
)
|
||||
.filter(
|
||||
resolved=False,
|
||||
silenced=False,
|
||||
)
|
||||
.exists()
|
||||
)
|
||||
if is_affected:
|
||||
affected_services.append(
|
||||
{
|
||||
"name": service_name,
|
||||
"service_url": url_builder.service_page(service_name),
|
||||
"alert_groups_url": url_builder.alert_groups(
|
||||
f"?status=0&status=1&started_at=now-{days_to_check}d_now&label=service_name:{service_name}"
|
||||
),
|
||||
}
|
||||
)
|
||||
return Response(affected_services)
|
||||
|
|
|
|||
|
|
@ -312,7 +312,7 @@ class AlertReceiveChannelView(
|
|||
if instance is None:
|
||||
# pop extra fields so they are not passed to AlertReceiveChannel(**serializer.validated_data)
|
||||
serializer.validated_data.pop("create_default_webhooks", None)
|
||||
IntegrationAlertGroupLabelsSerializer.pop_alert_group_labels(serializer.validated_data)
|
||||
IntegrationAlertGroupLabelsSerializer._pop_alert_group_labels(serializer.validated_data)
|
||||
|
||||
# create in-memory instance to test with the (possible) unsaved data
|
||||
instance = AlertReceiveChannel(**serializer.validated_data)
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ class Feature(enum.StrEnum):
|
|||
GRAFANA_ALERTING_V2 = "grafana_alerting_v2"
|
||||
LABELS = "labels"
|
||||
GOOGLE_OAUTH2 = "google_oauth2"
|
||||
SERVICE_DEPENDENCIES = "service_dependencies"
|
||||
|
||||
|
||||
class FeaturesAPIView(APIView):
|
||||
|
|
@ -72,4 +73,7 @@ class FeaturesAPIView(APIView):
|
|||
if settings.GOOGLE_OAUTH2_ENABLED:
|
||||
enabled_features.append(Feature.GOOGLE_OAUTH2)
|
||||
|
||||
if settings.FEATURE_SERVICE_DEPENDENCIES_ENABLED:
|
||||
enabled_features.append(Feature.SERVICE_DEPENDENCIES)
|
||||
|
||||
return enabled_features
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from apps.api.serializers.labels import (
|
|||
from apps.auth_token.auth import PluginAuthentication
|
||||
from apps.labels.client import LabelsAPIClient, LabelsRepoAPIException
|
||||
from apps.labels.tasks import update_instances_labels_cache, update_label_option_cache
|
||||
from apps.labels.types import LabelOption
|
||||
from apps.labels.utils import is_labels_feature_enabled
|
||||
from common.api_helpers.exceptions import BadRequest
|
||||
|
||||
|
|
@ -44,6 +45,7 @@ class LabelsViewSet(LabelsFeatureFlagViewSet):
|
|||
"rename_value": [RBACPermission.Permissions.LABEL_WRITE],
|
||||
"get_keys": [RBACPermission.Permissions.LABEL_READ],
|
||||
"get_key": [RBACPermission.Permissions.LABEL_READ],
|
||||
"get_key_by_name": [RBACPermission.Permissions.LABEL_READ],
|
||||
"get_value": [RBACPermission.Permissions.LABEL_READ],
|
||||
}
|
||||
|
||||
|
|
@ -66,6 +68,18 @@ class LabelsViewSet(LabelsFeatureFlagViewSet):
|
|||
self._update_labels_cache(label_option)
|
||||
return Response(label_option, status=response.status_code)
|
||||
|
||||
@extend_schema(responses=LabelOptionSerializer)
|
||||
def get_key_by_name(self, request, key_name):
|
||||
"""
|
||||
get_key_by_name returns LabelOption – key with the list of values
|
||||
"""
|
||||
organization = self.request.auth.organization
|
||||
label_option, response = LabelsAPIClient(
|
||||
organization.grafana_url,
|
||||
organization.api_token,
|
||||
).get_label_by_key_name(key_name)
|
||||
return Response(label_option, status=response.status_code)
|
||||
|
||||
@extend_schema(responses=LabelValueSerializer)
|
||||
def get_value(self, request, key_id, value_id):
|
||||
"""get_value returns a Value"""
|
||||
|
|
@ -133,7 +147,7 @@ class LabelsViewSet(LabelsFeatureFlagViewSet):
|
|||
self._update_labels_cache(label_option)
|
||||
return Response(label_option, status=status)
|
||||
|
||||
def _update_labels_cache(self, label_option):
|
||||
def _update_labels_cache(self, label_option: LabelOption):
|
||||
if not label_option:
|
||||
return
|
||||
serializer = LabelOptionSerializer(data=label_option)
|
||||
|
|
|
|||
|
|
@ -481,7 +481,7 @@ class ScheduleView(
|
|||
def reload_ical(self, request, pk):
|
||||
schedule = self.get_object(annotate=False)
|
||||
schedule.drop_cached_ical()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
|
||||
if schedule.user_group is not None:
|
||||
update_slack_user_group_for_schedules.apply_async((schedule.user_group.pk,))
|
||||
|
|
@ -567,7 +567,7 @@ class ScheduleView(
|
|||
},
|
||||
{
|
||||
"value": True,
|
||||
"display_name": "Mention person in slack",
|
||||
"display_name": "Mention person in Slack",
|
||||
},
|
||||
]
|
||||
return Response(options)
|
||||
|
|
|
|||
|
|
@ -103,3 +103,9 @@ def test_build_url_overriden_base_url(org_setup):
|
|||
@pytest.mark.django_db
|
||||
def test_build_url_works_for_irm_and_oncall_plugins(org_setup, is_grafana_irm_enabled, expected_url):
|
||||
assert UIURLBuilder(org_setup(is_grafana_irm_enabled)).alert_group_detail(ALERT_GROUP_ID) == expected_url
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_build_url_service_detail_page(org_setup):
|
||||
builder = UIURLBuilder(org_setup())
|
||||
assert builder.service_page("service-a") == f"{GRAFANA_URL}/a/{PluginID.SLO}/service/service-a"
|
||||
|
|
|
|||
|
|
@ -56,3 +56,6 @@ class UIURLBuilder:
|
|||
|
||||
def declare_incident(self, path_extra: str = "") -> str:
|
||||
return self._build_url("incidents/declare", path_extra, plugin_id=PluginID.INCIDENT)
|
||||
|
||||
def service_page(self, service_name: str, path_extra: str = "") -> str:
|
||||
return self._build_url(f"service/{service_name}", path_extra, plugin_id=PluginID.SLO)
|
||||
|
|
|
|||
|
|
@ -19,9 +19,14 @@ LABEL_VALUE_TYPES = (str, int, float, bool)
|
|||
MAX_LABELS_PER_ALERT_GROUP = 15
|
||||
|
||||
|
||||
def gather_labels_from_alert_receive_channel_and_raw_request_data(
|
||||
def gather_alert_labels(
|
||||
alert_receive_channel: "AlertReceiveChannel", raw_request_data: "Alert.RawRequestData"
|
||||
) -> typing.Optional[types.AlertLabels]:
|
||||
"""
|
||||
gather_alert_labels gathers labels for an alert received by the alert receive channel.
|
||||
1. static labels - inherits them from integration.
|
||||
2. dynamic labels and multi-label extraction template – templating the raw_request_data.
|
||||
"""
|
||||
if not is_labels_feature_enabled(alert_receive_channel.organization):
|
||||
return None
|
||||
|
||||
|
|
@ -37,7 +42,7 @@ def gather_labels_from_alert_receive_channel_and_raw_request_data(
|
|||
return labels
|
||||
|
||||
|
||||
def assign_labels(
|
||||
def save_alert_group_labels(
|
||||
alert_group: "AlertGroup", alert_receive_channel: "AlertReceiveChannel", labels: typing.Optional[types.AlertLabels]
|
||||
) -> None:
|
||||
from apps.labels.models import AlertGroupAssociatedLabel
|
||||
|
|
|
|||
|
|
@ -65,6 +65,15 @@ class LabelsAPIClient:
|
|||
self._check_response(response)
|
||||
return response.json(), response
|
||||
|
||||
def get_label_by_key_name(
|
||||
self, key_name: str
|
||||
) -> typing.Tuple[typing.Optional["LabelOption"], requests.models.Response]:
|
||||
url = urljoin(self.api_url, f"name/{key_name}")
|
||||
|
||||
response = requests.get(url, timeout=TIMEOUT, headers=self._request_headers)
|
||||
self._check_response(response)
|
||||
return response.json(), response
|
||||
|
||||
def get_value(
|
||||
self, key_id: str, value_id: str
|
||||
) -> typing.Tuple[typing.Optional["LabelValue"], requests.models.Response]:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
# TODO: MOVE IT TO /migrations DIRECTORY IN FUTURE RELEASE
|
||||
|
||||
# Generated by Django 4.2.15 on 2024-11-26 13:37
|
||||
|
||||
from django.db import migrations
|
||||
import django_migration_linter as linter
|
||||
|
||||
import common.migrations.remove_field
|
||||
|
||||
|
|
@ -13,9 +12,10 @@ class Migration(migrations.Migration):
|
|||
]
|
||||
|
||||
operations = [
|
||||
linter.IgnoreMigration(),
|
||||
common.migrations.remove_field.RemoveFieldDB(
|
||||
model_name="AlertReceiveChannelAssociatedLabel",
|
||||
name="inheritable",
|
||||
remove_state_migration=("labels", "0007_remove_alertreceivechannelassociatedlabel_inheritable_state"),
|
||||
remove_state_migration=("labels", "0006_remove_alertreceivechannelassociatedlabel_inheritable_state"),
|
||||
),
|
||||
]
|
||||
|
|
@ -1,8 +1,11 @@
|
|||
import logging
|
||||
import typing
|
||||
from json import JSONDecodeError
|
||||
|
||||
from django.db import models
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.labels.client import LabelsAPIClient, LabelsRepoAPIException
|
||||
from apps.labels.tasks import update_label_pairs_cache
|
||||
from apps.labels.types import LabelPair
|
||||
from apps.labels.utils import LABEL_OUTDATED_TIMEOUT_MINUTES
|
||||
|
|
@ -10,6 +13,7 @@ from apps.labels.utils import LABEL_OUTDATED_TIMEOUT_MINUTES
|
|||
if typing.TYPE_CHECKING:
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_KEY_NAME_LENGTH = 200
|
||||
MAX_VALUE_NAME_LENGTH = 200
|
||||
|
|
@ -26,6 +30,36 @@ class LabelKeyCache(models.Model):
|
|||
def is_outdated(self) -> bool:
|
||||
return timezone.now() - self.last_synced > timezone.timedelta(minutes=LABEL_OUTDATED_TIMEOUT_MINUTES)
|
||||
|
||||
@classmethod
|
||||
def get_or_create_by_name(cls, organization: "Organization", key_name: str) -> typing.Optional["LabelKeyCache"]:
|
||||
"""
|
||||
`get_or_create_by_name` tries to get label key with provided name from cache.
|
||||
If there is no label key with this name in the cache - it tries to fetch it from the labels repo API.
|
||||
"""
|
||||
label_key = cls.objects.filter(organization=organization, name=key_name).first()
|
||||
if label_key:
|
||||
return label_key
|
||||
|
||||
# fetch label key from labels repo
|
||||
try:
|
||||
label, _ = LabelsAPIClient(organization.grafana_url, organization.api_token).get_label_by_key_name(
|
||||
label_key
|
||||
)
|
||||
except (LabelsRepoAPIException, JSONDecodeError) as e:
|
||||
logger.error(f"Failed to get or create label key {key_name} for organization {organization.id}: {e}")
|
||||
return None
|
||||
|
||||
# save labels key in cache
|
||||
label_key = LabelKeyCache(
|
||||
id=label["key"]["id"],
|
||||
name=label["key"]["name"],
|
||||
organization=organization,
|
||||
prescribed=label["key"]["prescribed"],
|
||||
)
|
||||
label_key.save()
|
||||
|
||||
return label_key
|
||||
|
||||
|
||||
class LabelValueCache(models.Model):
|
||||
id = models.CharField(primary_key=True, editable=False, max_length=36)
|
||||
|
|
|
|||
|
|
@ -8,12 +8,13 @@ from django.utils import timezone
|
|||
from apps.labels.client import LabelsAPIClient, LabelsRepoAPIException
|
||||
from apps.labels.types import LabelOption, LabelPair
|
||||
from apps.labels.utils import LABEL_OUTDATED_TIMEOUT_MINUTES, get_associating_label_model
|
||||
from apps.user_management.models import Organization
|
||||
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
MAX_RETRIES = 1 if settings.DEBUG else 10
|
||||
|
||||
|
||||
class KVPair(typing.TypedDict):
|
||||
value_name: str
|
||||
|
|
@ -129,11 +130,10 @@ def _update_labels_cache(values_id_to_pair: typing.Dict[str, LabelPair]):
|
|||
LabelValueCache.objects.bulk_update(values, fields=["name", "last_synced", "prescribed"])
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(
|
||||
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else 10
|
||||
)
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def update_instances_labels_cache(organization_id: int, instance_ids: typing.List[int], instance_model_name: str):
|
||||
from apps.labels.models import LabelValueCache
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
now = timezone.now()
|
||||
organization = Organization.objects.get(id=organization_id)
|
||||
|
|
@ -162,3 +162,69 @@ def update_instances_labels_cache(organization_id: int, instance_ids: typing.Lis
|
|||
continue
|
||||
if label_option:
|
||||
update_label_option_cache.apply_async((label_option,))
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def add_service_label_for_alerting_integrations():
|
||||
"""
|
||||
This task should be called manually and only once.
|
||||
Starts tasks that add `service_name` dynamic label for Alerting integrations
|
||||
"""
|
||||
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
|
||||
organization_ids = (
|
||||
AlertReceiveChannel.objects.filter(
|
||||
integration=AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
|
||||
organization__is_grafana_labels_enabled=True,
|
||||
organization__deleted_at__isnull=True,
|
||||
)
|
||||
.values_list("organization", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
for idx, organization_id in enumerate(organization_ids):
|
||||
countdown = idx // 10
|
||||
add_service_label_per_org.apply_async((organization_id,), countdown=countdown)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def add_service_label_per_org(organization_id: int):
|
||||
"""Add `service_name` dynamic label for all Alerting integrations per organization"""
|
||||
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.user_management.models import Organization
|
||||
|
||||
organization = Organization.objects.get(id=organization_id)
|
||||
service_label_custom = AlertReceiveChannel._build_service_name_label_custom(organization)
|
||||
if not service_label_custom:
|
||||
return
|
||||
integrations = AlertReceiveChannel.objects.filter(
|
||||
integration=AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
|
||||
organization=organization,
|
||||
)
|
||||
integrations_to_update = []
|
||||
# add service label to integration custom labels if it's not already there
|
||||
for integration in integrations:
|
||||
dynamic_service_label_exists = False
|
||||
dynamic_labels = integration.alert_group_labels_custom if integration.alert_group_labels_custom else []
|
||||
for label in dynamic_labels:
|
||||
if label[0] == service_label_custom[0]:
|
||||
dynamic_service_label_exists = True
|
||||
break
|
||||
if dynamic_service_label_exists:
|
||||
continue
|
||||
integration.alert_group_labels_custom = [service_label_custom] + dynamic_labels
|
||||
integrations_to_update.append(integration)
|
||||
|
||||
AlertReceiveChannel.objects.bulk_update(integrations_to_update, fields=["alert_group_labels_custom"])
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES)
|
||||
def add_service_label_for_integration(alert_receive_channel_id: int):
|
||||
"""Add `service_name` dynamic label for Alerting integration"""
|
||||
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
|
||||
alert_receive_channel = AlertReceiveChannel.objects.get(id=alert_receive_channel_id)
|
||||
alert_receive_channel.create_service_name_dynamic_label(True)
|
||||
|
|
|
|||
40
engine/apps/labels/tests/test_add_service_label.py
Normal file
40
engine/apps/labels/tests/test_add_service_label.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
import pytest
|
||||
|
||||
from apps.alerts.constants import SERVICE_LABEL, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.labels.tasks import add_service_label_per_org
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_service_label_per_org(make_organization, make_alert_receive_channel, make_label_key):
|
||||
organization = make_organization()
|
||||
alert_receive_channel_alerting_no_labels = make_alert_receive_channel(
|
||||
organization=organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING
|
||||
)
|
||||
alert_receive_channel_alerting_with_label = make_alert_receive_channel(
|
||||
organization=organization,
|
||||
integration=AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
|
||||
alert_group_labels_custom=[["test", None, "test_template"]],
|
||||
)
|
||||
alert_receive_channel_grafana = make_alert_receive_channel(
|
||||
organization=organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
|
||||
)
|
||||
service_name_label_key = make_label_key(organization, key_id="service_label_id", key_name=SERVICE_LABEL)
|
||||
|
||||
expected_service_name_label = [service_name_label_key.id, None, SERVICE_LABEL_TEMPLATE_FOR_ALERTING_INTEGRATION]
|
||||
|
||||
add_service_label_per_org(organization.id)
|
||||
|
||||
for alert_receive_channel in [
|
||||
alert_receive_channel_alerting_no_labels,
|
||||
alert_receive_channel_alerting_with_label,
|
||||
alert_receive_channel_grafana,
|
||||
]:
|
||||
alert_receive_channel.refresh_from_db()
|
||||
|
||||
assert alert_receive_channel_alerting_no_labels.alert_group_labels_custom == [expected_service_name_label]
|
||||
assert alert_receive_channel_alerting_with_label.alert_group_labels_custom == [
|
||||
expected_service_name_label,
|
||||
["test", None, "test_template"],
|
||||
]
|
||||
assert alert_receive_channel_grafana.alert_group_labels_custom is None
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
from json import JSONDecodeError
|
||||
from unittest.mock import call, patch
|
||||
|
||||
import pytest
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.labels.client import LabelsRepoAPIException
|
||||
from apps.labels.client import LabelsAPIClient, LabelsRepoAPIException
|
||||
from apps.labels.models import LabelKeyCache, LabelValueCache
|
||||
from apps.labels.tasks import update_instances_labels_cache, update_labels_cache
|
||||
from apps.labels.utils import LABEL_OUTDATED_TIMEOUT_MINUTES
|
||||
|
|
@ -158,3 +159,33 @@ def test_update_instances_labels_cache_error(make_organization, make_alert_recei
|
|||
)
|
||||
mock_get_label_by_key_id.assert_called_once_with(label_association.key_id)
|
||||
mock_update_cache.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_or_create_label_key_cache_by_name(make_organization):
|
||||
organization = make_organization()
|
||||
label_key_data = {"id": "testid", "name": "testname", "prescribed": False}
|
||||
|
||||
# test empty response from label repo (json decode error)
|
||||
with patch.object(LabelsAPIClient, "get_label_by_key_name", side_effect=JSONDecodeError("test", "test", 0)):
|
||||
label = LabelKeyCache.get_or_create_by_name(organization, label_key_data["name"])
|
||||
|
||||
assert label is None
|
||||
|
||||
# test label does not exist in labels repo
|
||||
with patch.object(LabelsAPIClient, "get_label_by_key_name", side_effect=LabelsRepoAPIException("test", "test")):
|
||||
label = LabelKeyCache.get_or_create_by_name(organization, label_key_data["name"])
|
||||
|
||||
assert label is None
|
||||
|
||||
# test label does not exist in cache
|
||||
with patch.object(LabelsAPIClient, "get_label_by_key_name", return_value=({"key": label_key_data}, None)):
|
||||
label = LabelKeyCache.get_or_create_by_name(organization, label_key_data["name"])
|
||||
|
||||
assert label is not None
|
||||
assert LabelKeyCache.objects.filter(id=label.id).exists()
|
||||
|
||||
# test label exists in cache
|
||||
label = LabelKeyCache.get_or_create_by_name(organization, label_key_data["name"])
|
||||
assert label is not None
|
||||
assert LabelKeyCache.objects.filter(id=label.id).exists()
|
||||
|
|
|
|||
|
|
@ -191,7 +191,10 @@ class EscalationPolicySerializer(EagerLoadingMixin, OrderedModelSerializer):
|
|||
EscalationPolicy.STEP_NOTIFY_TEAM_MEMBERS_IMPORTANT,
|
||||
]:
|
||||
fields_to_remove.remove("team_to_notify")
|
||||
elif step == EscalationPolicy.STEP_NOTIFY_USERS_QUEUE:
|
||||
elif step in [
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
]:
|
||||
fields_to_remove.remove("persons_to_notify_next_each_time")
|
||||
elif step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
|
||||
fields_to_remove.remove("group_to_notify")
|
||||
|
|
@ -243,6 +246,7 @@ class EscalationPolicySerializer(EagerLoadingMixin, OrderedModelSerializer):
|
|||
validated_data_fields_to_remove.remove("wait_delay")
|
||||
elif step in [
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
]:
|
||||
|
|
@ -298,6 +302,7 @@ class EscalationPolicyUpdateSerializer(EscalationPolicySerializer):
|
|||
instance.wait_delay = None
|
||||
if step not in [
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
]:
|
||||
|
|
|
|||
|
|
@ -123,6 +123,7 @@ class IntegrationSerializer(EagerLoadingMixin, serializers.ModelSerializer, Main
|
|||
connection_error = GrafanaAlertingSyncManager.check_for_connection_errors(organization)
|
||||
if connection_error:
|
||||
raise serializers.ValidationError(connection_error)
|
||||
validated_data = self._add_service_label_if_needed(organization, validated_data)
|
||||
user = self.context["request"].user
|
||||
with transaction.atomic():
|
||||
try:
|
||||
|
|
@ -140,6 +141,8 @@ class IntegrationSerializer(EagerLoadingMixin, serializers.ModelSerializer, Main
|
|||
)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
serializer.save()
|
||||
# Create default service_name label
|
||||
instance.create_service_name_dynamic_label()
|
||||
return instance
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -29,5 +29,6 @@ EXPORT_WINDOW_DAYS_BEFORE = 15
|
|||
|
||||
SCHEDULE_ONCALL_CACHE_KEY_PREFIX = "schedule_oncall_users_"
|
||||
SCHEDULE_ONCALL_CACHE_TTL = 15 * 60 # 15 minutes in seconds
|
||||
SCHEDULE_CHECK_NEXT_DAYS = 30
|
||||
|
||||
PREFETCHED_SHIFT_SWAPS = "prefetched_shift_swaps"
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ from apps.schedules.constants import (
|
|||
ICAL_SUMMARY,
|
||||
ICAL_UID,
|
||||
PREFETCHED_SHIFT_SWAPS,
|
||||
SCHEDULE_CHECK_NEXT_DAYS,
|
||||
)
|
||||
from apps.schedules.ical_utils import (
|
||||
EmptyShifts,
|
||||
|
|
@ -293,9 +294,9 @@ class OnCallSchedule(PolymorphicModel):
|
|||
(self.prev_ical_file_overrides, self.cached_ical_file_overrides),
|
||||
]
|
||||
|
||||
def check_gaps_and_empty_shifts_for_next_week(self) -> None:
|
||||
def check_gaps_and_empty_shifts_for_next_days(self, days=SCHEDULE_CHECK_NEXT_DAYS) -> None:
|
||||
datetime_start = timezone.now()
|
||||
datetime_end = datetime_start + datetime.timedelta(days=7)
|
||||
datetime_end = datetime_start + datetime.timedelta(days=days)
|
||||
|
||||
# get empty shifts from all events and gaps from final events
|
||||
events = self.filter_events(
|
||||
|
|
@ -313,14 +314,14 @@ class OnCallSchedule(PolymorphicModel):
|
|||
self.has_empty_shifts = has_empty_shifts
|
||||
self.save(update_fields=["has_gaps", "has_empty_shifts"])
|
||||
|
||||
def get_gaps_for_next_week(self) -> ScheduleEvents:
|
||||
def get_gaps_for_next_days(self, days=SCHEDULE_CHECK_NEXT_DAYS) -> ScheduleEvents:
|
||||
today = timezone.now()
|
||||
events = self.final_events(today, today + datetime.timedelta(days=7))
|
||||
events = self.final_events(today, today + datetime.timedelta(days=days))
|
||||
return [event for event in events if event["is_gap"]]
|
||||
|
||||
def get_empty_shifts_for_next_week(self) -> EmptyShifts:
|
||||
def get_empty_shifts_for_next_days(self, days=SCHEDULE_CHECK_NEXT_DAYS) -> EmptyShifts:
|
||||
today = timezone.now().date()
|
||||
return list_of_empty_shifts_in_schedule(self, today, today + datetime.timedelta(days=7))
|
||||
return list_of_empty_shifts_in_schedule(self, today, today + datetime.timedelta(days=days))
|
||||
|
||||
def drop_cached_ical(self):
|
||||
self._drop_primary_ical_file()
|
||||
|
|
|
|||
|
|
@ -19,5 +19,5 @@ def check_gaps_and_empty_shifts_in_schedule(schedule_pk):
|
|||
task_logger.info(f"Tried to check_gaps_and_empty_shifts_in_schedule for non-existing schedule {schedule_pk}")
|
||||
return
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
task_logger.info(f"Finish check_gaps_and_empty_shifts_in_schedule {schedule_pk}")
|
||||
|
|
|
|||
|
|
@ -48,15 +48,15 @@ def notify_about_empty_shifts_in_schedule_task(schedule_pk):
|
|||
task_logger.info(f"Tried to notify_about_empty_shifts_in_schedule_task for non-existing schedule {schedule_pk}")
|
||||
return
|
||||
|
||||
empty_shifts = schedule.get_empty_shifts_for_next_week()
|
||||
empty_shifts = schedule.get_empty_shifts_for_next_days()
|
||||
schedule.empty_shifts_report_sent_at = timezone.now().date()
|
||||
|
||||
if len(empty_shifts) != 0:
|
||||
schedule.has_empty_shifts = True
|
||||
text = (
|
||||
f'Tried to parse schedule *"{schedule.name}"* and found events without associated users.\n'
|
||||
f"To ensure you don't miss any notifications, use a Grafana username as the event name in the calendar. "
|
||||
f"The user should have Editor or Admin access.\n\n"
|
||||
f"Reviewing *{schedule.slack_url}* on-call schedule found events without valid associated users.\n"
|
||||
f"To ensure you don't miss any notifications, make sure user exists (or you provided a valid Grafana username). "
|
||||
f"The user should have the right permissions, or be an Editor or Admin.\n\n"
|
||||
)
|
||||
for idx, empty_shift in enumerate(empty_shifts):
|
||||
start_timestamp = empty_shift.start.astimezone(pytz.UTC).timestamp()
|
||||
|
|
@ -80,7 +80,6 @@ def notify_about_empty_shifts_in_schedule_task(schedule_pk):
|
|||
text += '*All-day* event in "UTC" TZ\n'
|
||||
else:
|
||||
text += f"From {format_datetime_to_slack_with_time(start_timestamp)} to {format_datetime_to_slack_with_time(end_timestamp)} (your TZ)\n"
|
||||
text += f"_From {OnCallSchedule.CALENDAR_TYPE_VERBAL[empty_shift.calendar_type]} calendar_\n"
|
||||
if idx != len(empty_shifts) - 1:
|
||||
text += "\n\n"
|
||||
post_message_to_channel(schedule.organization, schedule.slack_channel_slack_id, text)
|
||||
|
|
|
|||
|
|
@ -48,13 +48,13 @@ def notify_about_gaps_in_schedule_task(schedule_pk):
|
|||
task_logger.info(f"Tried to notify_about_gaps_in_schedule_task for non-existing schedule {schedule_pk}")
|
||||
return
|
||||
|
||||
gaps = schedule.get_gaps_for_next_week()
|
||||
gaps = schedule.get_gaps_for_next_days()
|
||||
schedule.gaps_report_sent_at = timezone.now().date()
|
||||
|
||||
if len(gaps) != 0:
|
||||
schedule.has_gaps = True
|
||||
text = f"There are time periods that are unassigned in *{schedule.name}* on-call schedule.\n"
|
||||
for idx, gap in enumerate(gaps):
|
||||
text = f"There are time periods that are unassigned in *{schedule.slack_url}* on-call schedule.\n"
|
||||
for gap in gaps:
|
||||
if gap["start"]:
|
||||
start_verbal = format_datetime_to_slack_with_time(gap["start"].astimezone(pytz.UTC).timestamp())
|
||||
else:
|
||||
|
|
@ -64,8 +64,7 @@ def notify_about_gaps_in_schedule_task(schedule_pk):
|
|||
else:
|
||||
end_verbal = "..."
|
||||
text += f"From {start_verbal} to {end_verbal} (your TZ)\n"
|
||||
if idx != len(gaps) - 1:
|
||||
text += "\n\n"
|
||||
text += "\n\n"
|
||||
post_message_to_channel(schedule.organization, schedule.slack_channel_slack_id, text)
|
||||
else:
|
||||
schedule.has_gaps = False
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import pytest
|
|||
from django.utils import timezone
|
||||
|
||||
from apps.api.permissions import LegacyAccessControlRole
|
||||
from apps.schedules.constants import SCHEDULE_CHECK_NEXT_DAYS
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallScheduleWeb
|
||||
|
||||
|
||||
|
|
@ -34,7 +35,7 @@ def test_no_empty_shifts_no_gaps(
|
|||
)
|
||||
on_call_shift.add_rolling_users([[user1]])
|
||||
schedule.refresh_ical_file()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
|
||||
assert schedule.has_gaps is False
|
||||
|
|
@ -73,7 +74,7 @@ def test_no_empty_shifts_but_gaps_now(
|
|||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
|
||||
assert schedule.has_gaps is True
|
||||
|
|
@ -111,7 +112,7 @@ def test_empty_shifts_no_gaps(
|
|||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
|
||||
assert schedule.has_gaps is False
|
||||
|
|
@ -150,7 +151,7 @@ def test_empty_shifts_and_gaps(
|
|||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
|
||||
assert schedule.has_gaps is True
|
||||
|
|
@ -206,7 +207,7 @@ def test_empty_shifts_and_gaps_in_the_past(
|
|||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
|
||||
assert schedule.has_gaps is False
|
||||
|
|
@ -225,9 +226,9 @@ def test_empty_shifts_and_gaps_in_the_future(
|
|||
user2 = make_user(organization=organization, username="user2", role=LegacyAccessControlRole.ADMIN)
|
||||
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb, name="test_schedule")
|
||||
# empty shift with gaps starts in 7 days 1 min
|
||||
# empty shift with gaps starts in SCHEDULE_CHECK_NEXT_DAYS days 1 min
|
||||
now = timezone.now().replace(microsecond=0)
|
||||
start_date = now + datetime.timedelta(days=7, minutes=1)
|
||||
start_date = now + datetime.timedelta(days=SCHEDULE_CHECK_NEXT_DAYS, minutes=1)
|
||||
data = {
|
||||
"start": start_date,
|
||||
"rotation_start": start_date,
|
||||
|
|
@ -241,9 +242,9 @@ def test_empty_shifts_and_gaps_in_the_future(
|
|||
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
|
||||
)
|
||||
on_call_shift.add_rolling_users([[user1]])
|
||||
# normal shift ends in 7 days 1 min
|
||||
start_date2 = now - datetime.timedelta(days=7, minutes=1)
|
||||
until = now + datetime.timedelta(days=7, minutes=1)
|
||||
# normal shift ends in SCHEDULE_CHECK_NEXT_DAYS days 1 min
|
||||
start_date2 = now - datetime.timedelta(days=SCHEDULE_CHECK_NEXT_DAYS, minutes=1)
|
||||
until = now + datetime.timedelta(days=SCHEDULE_CHECK_NEXT_DAYS, minutes=1)
|
||||
data2 = {
|
||||
"start": start_date2,
|
||||
"rotation_start": start_date2,
|
||||
|
|
@ -262,8 +263,8 @@ def test_empty_shifts_and_gaps_in_the_future(
|
|||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
||||
schedule.check_gaps_and_empty_shifts_for_next_week()
|
||||
schedule.check_gaps_and_empty_shifts_for_next_days()
|
||||
schedule.refresh_from_db()
|
||||
# no gaps and empty shifts in the next 7 days
|
||||
# no gaps and empty shifts in the next SCHEDULE_CHECK_NEXT_DAYS days
|
||||
assert schedule.has_gaps is False
|
||||
assert schedule.has_empty_shifts is False
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from unittest.mock import patch
|
|||
import pytest
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.schedules.constants import SCHEDULE_CHECK_NEXT_DAYS
|
||||
from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar, OnCallScheduleICal, OnCallScheduleWeb
|
||||
from apps.schedules.tasks import notify_about_gaps_in_schedule_task, start_notify_about_gaps_in_schedule
|
||||
|
||||
|
|
@ -236,7 +237,7 @@ def test_gaps_near_future_trigger_notification(
|
|||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_gaps_later_than_7_days_no_triggering_notification(
|
||||
def test_gaps_later_than_days_no_triggering_notification(
|
||||
make_slack_team_identity,
|
||||
make_slack_channel,
|
||||
make_organization,
|
||||
|
|
@ -259,8 +260,8 @@ def test_gaps_later_than_7_days_no_triggering_notification(
|
|||
prev_ical_file_overrides=None,
|
||||
cached_ical_file_overrides=None,
|
||||
)
|
||||
start_date = now - datetime.timedelta(days=7, minutes=1)
|
||||
until_date = now + datetime.timedelta(days=8)
|
||||
start_date = now - datetime.timedelta(days=SCHEDULE_CHECK_NEXT_DAYS, minutes=1)
|
||||
until_date = now + datetime.timedelta(days=SCHEDULE_CHECK_NEXT_DAYS + 1)
|
||||
data = {
|
||||
"start": start_date,
|
||||
"rotation_start": start_date,
|
||||
|
|
|
|||
|
|
@ -558,7 +558,7 @@ class ResolutionNoteModalStep(AlertGroupActionsMixin, scenario_step.ScenarioStep
|
|||
user_verbal = resolution_note.author_verbal(mention=True)
|
||||
message_timestamp = datetime.datetime.timestamp(resolution_note.created_at)
|
||||
blocks.append(DIVIDER)
|
||||
source = "web" if resolution_note.source == ResolutionNote.Source.WEB else "slack"
|
||||
source = "web" if resolution_note.source == ResolutionNote.Source.WEB else "Slack"
|
||||
|
||||
blocks.append(
|
||||
typing.cast(
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@ if typing.TYPE_CHECKING:
|
|||
class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep):
|
||||
notify_empty_oncall_options = {choice[0]: choice[1] for choice in OnCallSchedule.NotifyEmptyOnCall.choices}
|
||||
notify_oncall_shift_freq_options = {choice[0]: choice[1] for choice in OnCallSchedule.NotifyOnCallShiftFreq.choices}
|
||||
mention_oncall_start_options = {1: "Mention person in slack", 0: "Inform in channel without mention"}
|
||||
mention_oncall_next_options = {1: "Mention person in slack", 0: "Inform in channel without mention"}
|
||||
mention_oncall_start_options = {1: "Mention person in Slack", 0: "Inform in channel without mention"}
|
||||
mention_oncall_next_options = {1: "Mention person in Slack", 0: "Inform in channel without mention"}
|
||||
|
||||
def process_scenario(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -586,7 +586,7 @@ class ResetSlackView(APIView):
|
|||
# just a placeholder value to continute uninstallation until UNIFIED_SLACK_APP_ENABLED is not enabled
|
||||
removed = True
|
||||
if not removed:
|
||||
return Response({"error": "Failed to uninstall slack integration"}, status=500)
|
||||
return Response({"error": "Failed to uninstall Slack integration"}, status=500)
|
||||
|
||||
try:
|
||||
uninstall_slack_integration(request.user.organization, request.user)
|
||||
|
|
|
|||
|
|
@ -132,13 +132,17 @@ def _extract_users_from_escalation_snapshot(escalation_snapshot):
|
|||
if escalation_snapshot:
|
||||
for policy_snapshot in escalation_snapshot.escalation_policies_snapshots:
|
||||
if policy_snapshot.step in [
|
||||
EscalationPolicy.STEP_NOTIFY,
|
||||
EscalationPolicy.STEP_NOTIFY_IMPORTANT,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
|
||||
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
|
||||
]:
|
||||
for user in policy_snapshot.notify_to_users_queue:
|
||||
users.append(_serialize_event_user(user))
|
||||
elif policy_snapshot.step in [
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
|
||||
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE_IMPORTANT,
|
||||
]:
|
||||
if policy_snapshot.notify_to_users_queue:
|
||||
users.append(_serialize_event_user(policy_snapshot.next_user_in_sorted_queue))
|
||||
elif policy_snapshot.step in [
|
||||
EscalationPolicy.STEP_NOTIFY_SCHEDULE,
|
||||
EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT,
|
||||
|
|
|
|||
|
|
@ -5,3 +5,4 @@ class PluginID:
|
|||
INCIDENT = "grafana-incident-app"
|
||||
LABELS = "grafana-labels-app"
|
||||
ML = "grafana-ml-app"
|
||||
SLO = "grafana-slo-app"
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ charset-normalizer==3.3.2
|
|||
# requests
|
||||
distlib==0.3.8
|
||||
# via virtualenv
|
||||
django==4.2.17
|
||||
django==4.2.18
|
||||
# via
|
||||
# -c requirements.txt
|
||||
# django-stubs
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
babel==2.12.1
|
||||
beautifulsoup4==4.12.2
|
||||
celery[redis]==5.3.1
|
||||
celery[redis]==5.3.6
|
||||
cryptography==43.0.1
|
||||
django==4.2.17
|
||||
django==4.2.18
|
||||
django-add-default-value==0.10.0
|
||||
django-anymail[amazon-ses]==12.0
|
||||
django-cors-headers==3.7.0
|
||||
|
|
@ -32,6 +32,7 @@ fcm-django @ https://github.com/grafana/fcm-django/archive/refs/tags/v1.0.12r1.t
|
|||
hiredis==2.2.3
|
||||
humanize==4.10.0
|
||||
icalendar==5.0.10
|
||||
jinja2==3.1.5
|
||||
lxml==5.2.2
|
||||
markdown2==2.4.10
|
||||
opentelemetry-sdk==1.26.0
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ cachetools==4.2.2
|
|||
# via
|
||||
# google-auth
|
||||
# python-telegram-bot
|
||||
celery[redis]==5.3.1
|
||||
celery==5.3.6
|
||||
# via -r requirements.in
|
||||
certifi==2024.7.4
|
||||
# via
|
||||
|
|
@ -75,7 +75,7 @@ deprecated==1.2.14
|
|||
# opentelemetry-api
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-semantic-conventions
|
||||
django==4.2.17
|
||||
django==4.2.18
|
||||
# via
|
||||
# -r requirements.in
|
||||
# django-add-default-value
|
||||
|
|
@ -97,7 +97,7 @@ django==4.2.17
|
|||
# social-auth-app-django
|
||||
django-add-default-value==0.10.0
|
||||
# via -r requirements.in
|
||||
django-anymail[amazon-ses]==12.0
|
||||
django-anymail==12.0
|
||||
# via -r requirements.in
|
||||
django-cors-headers==3.7.0
|
||||
# via -r requirements.in
|
||||
|
|
@ -152,7 +152,7 @@ firebase-admin==5.4.0
|
|||
# via fcm-django
|
||||
flask==3.0.2
|
||||
# via slack-export-viewer
|
||||
google-api-core[grpc]==2.17.0
|
||||
google-api-core==2.17.0
|
||||
# via
|
||||
# firebase-admin
|
||||
# google-api-python-client
|
||||
|
|
@ -229,8 +229,10 @@ inflection==0.5.1
|
|||
# via drf-spectacular
|
||||
itsdangerous==2.1.2
|
||||
# via flask
|
||||
jinja2==3.1.4
|
||||
# via flask
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# -r requirements.in
|
||||
# flask
|
||||
jmespath==1.0.1
|
||||
# via
|
||||
# boto3
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ FEATURE_ALERT_GROUP_SEARCH_ENABLED = getenv_boolean("FEATURE_ALERT_GROUP_SEARCH_
|
|||
FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS = getenv_integer("FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS", default=None)
|
||||
FEATURE_NOTIFICATION_BUNDLE_ENABLED = getenv_boolean("FEATURE_NOTIFICATION_BUNDLE_ENABLED", default=True)
|
||||
FEATURE_DECLARE_INCIDENT_STEP_ENABLED = getenv_boolean("FEATURE_DECLARE_INCIDENT_STEP_ENABLED", default=False)
|
||||
FEATURE_SERVICE_DEPENDENCIES_ENABLED = getenv_boolean("FEATURE_SERVICE_DEPENDENCIES_ENABLED", default=False)
|
||||
|
||||
TWILIO_API_KEY_SID = os.environ.get("TWILIO_API_KEY_SID")
|
||||
TWILIO_API_KEY_SECRET = os.environ.get("TWILIO_API_KEY_SECRET")
|
||||
|
|
|
|||
|
|
@ -17,6 +17,9 @@ CELERY_TASK_ROUTES = {
|
|||
"apps.labels.tasks.update_instances_labels_cache": {"queue": "default"},
|
||||
"apps.labels.tasks.update_label_option_cache": {"queue": "default"},
|
||||
"apps.labels.tasks.update_label_pairs_cache": {"queue": "default"},
|
||||
"apps.labels.tasks.add_service_label_for_alerting_integrations": {"queue": "default"},
|
||||
"apps.labels.tasks.add_service_label_per_org": {"queue": "default"},
|
||||
"apps.labels.tasks.add_service_label_for_integration": {"queue": "default"},
|
||||
"apps.metrics_exporter.tasks.start_calculate_and_cache_metrics": {"queue": "default"},
|
||||
"apps.metrics_exporter.tasks.update_metrics_for_alert_group": {"queue": "default"},
|
||||
"apps.metrics_exporter.tasks.update_metrics_for_user": {"queue": "default"},
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue