2024-07-23 17:12:00 +01:00
from datetime import timedelta
2022-06-03 08:09:47 -06:00
2024-06-04 14:51:02 +03:00
from django . conf import settings
2023-01-22 00:14:48 +08:00
from django . core . exceptions import ObjectDoesNotExist
2024-10-03 01:09:50 +08:00
from django . db . models import Q
2024-07-23 17:12:00 +01:00
from django . utils import timezone
2022-06-03 08:09:47 -06:00
from django_filters import rest_framework as filters
2024-01-12 15:11:22 +00:00
from drf_spectacular . utils import extend_schema , inline_serializer
2023-08-16 14:13:56 +08:00
from rest_framework import mixins , serializers , status , viewsets
2022-06-03 08:09:47 -06:00
from rest_framework . decorators import action
2023-01-22 00:14:48 +08:00
from rest_framework . exceptions import NotFound
2022-06-03 08:09:47 -06:00
from rest_framework . filters import SearchFilter
from rest_framework . permissions import IsAuthenticated
from rest_framework . response import Response
from apps . alerts . constants import ActionSource
2024-10-24 11:24:36 +02:00
from apps . alerts . models import AlertGroup , AlertReceiveChannel , ResolutionNote
2023-01-17 12:19:08 -03:00
from apps . alerts . paging import unpage_user
2023-11-06 15:30:32 -05:00
from apps . alerts . tasks import delete_alert_group , send_update_resolution_note_signal
Reworked declare incident escalation step (#5130)
Reworked https://github.com/grafana/oncall/pull/5047. Main update is the
switch from FK to a [M2M
relation](https://docs.google.com/document/d/1HeulqxoFShSHtInQrZNJLL5MDlHPNT50rVGaK3zZWvw/edit?disco=AAABVLjV4W8)
(which doesn't really change the original/intended behavior, besides not
needing to alter the alert group table, and it is a bit more flexible;
the extra table shouldn't introduce issues because this is used only for
tracking purposes and the information needed in the log record is
already there).
Avoid a db migration involving alert group table:
```
--
-- Create model RelatedIncident
--
CREATE TABLE `alerts_relatedincident` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `incident_id` varchar(50) NOT NULL, `created_at` datetime(6) NOT NULL, `is_active` bool NOT NULL, `channel_filter_id` bigint NULL, `organization_id` bigint NOT NULL);
CREATE TABLE `alerts_relatedincident_attached_alert_groups` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `relatedincident_id` bigint NOT NULL, `alertgroup_id` bigint NOT NULL);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincident_organization_id_incident_id_d7fc9a4f_uniq` UNIQUE (`organization_id`, `incident_id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_channel_filter_id_9556c836_fk_alerts_ch` FOREIGN KEY (`channel_filter_id`) REFERENCES `alerts_channelfilter` (`id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_organization_id_74ed6bed_fk_user_mana` FOREIGN KEY (`organization_id`) REFERENCES `user_management_organization` (`id`);
CREATE INDEX `alerts_relatedincident_incident_id_8356a799` ON `alerts_relatedincident` (`incident_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincident_a_relatedincident_id_alert_3d683baa_uniq` UNIQUE (`relatedincident_id`, `alertgroup_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_relatedincident_id_3e5e7a23_fk_alerts_re` FOREIGN KEY (`relatedincident_id`) REFERENCES `alerts_relatedincident` (`id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_alertgroup_id_0125deca_fk_alerts_al` FOREIGN KEY (`alertgroup_id`) REFERENCES `alerts_alertgroup` (`id`);
```
2024-10-07 16:26:10 -03:00
from apps . alerts . utils import is_declare_incident_step_enabled
2023-06-07 20:19:16 +08:00
from apps . api . errors import AlertGroupAPIError
Webhook labels (#3383)
This PR add labels for webhooks.
1. Make webhook "labelable" with ability to filter by labels.
2. Add labels to the webhook payload. It contain new field webhook with
it's name, id and labels. Field integration and alert_group has a
corresponding label field as well. See example of a new payload below:
```
{
"event": {
"type": "escalation"
},
"user": null,
"alert_group": {
"id": "IRFN6ZD31N31B",
"integration_id": "CTWM7U4A2QG97",
"route_id": "RUE7U7Z46SKGY",
"alerts_count": 1,
"state": "firing",
"created_at": "2023-11-22T08:54:55.178243Z",
"resolved_at": null,
"acknowledged_at": null,
"title": "Incident",
"permalinks": {
"slack": null,
"telegram": null,
"web": "http://grafana:3000/a/grafana-oncall-app/alert-groups/IRFN6ZD31N31B"
},
"labels": {
"severity": "critical"
}
},
"alert_group_id": "IRFN6ZD31N31B",
"alert_payload": {
"message": "This alert was sent by user for demonstration purposes"
},
"integration": {
"id": "CTWM7U4A2QG97",
"type": "webhook",
"name": "hi - Webhook",
"team": null,
"labels": {
"hello": "world",
"severity": "critical"
}
},
"notified_users": [],
"users_to_be_notified": [],
"webhook": {
"id": "WHAXK4BTC7TAEQ",
"name": "test",
"labels": {
"hello": "kesha"
}
}
}
```
I feel that there is an opportunity to make code cleaner - remove all
label logic from serializers, views and utils to models or dedicated
LabelerService and introduce Labelable interface with something like
label_verbal, update_labels methods. However, I don't want to tie
webhook labels with a refactoring.
---------
Co-authored-by: Dominik <dominik.broj@grafana.com>
2023-11-22 19:17:41 +08:00
from apps . api . label_filtering import parse_label_query
2022-11-29 09:41:56 +01:00
from apps . api . permissions import RBACPermission
2022-07-27 12:14:59 +01:00
from apps . api . serializers . alert_group import AlertGroupListSerializer , AlertGroupSerializer
2024-01-10 15:52:59 +01:00
from apps . api . serializers . alert_group_escalation_snapshot import AlertGroupEscalationSnapshotAPISerializer
2023-01-22 00:14:48 +08:00
from apps . api . serializers . team import TeamSerializer
2022-11-23 15:56:43 +00:00
from apps . auth_token . auth import PluginAuthentication
2023-04-27 11:20:45 +08:00
from apps . base . models . user_notification_policy_log_record import UserNotificationPolicyLogRecord
2025-01-16 09:19:32 -03:00
from apps . grafana_plugin . ui_url_builder import UIURLBuilder
2023-11-06 10:31:12 +00:00
from apps . labels . utils import is_labels_feature_enabled
2022-11-23 15:56:43 +00:00
from apps . mobile_app . auth import MobileAppAuthTokenAuthentication
2023-01-22 00:14:48 +08:00
from apps . user_management . models import Team , User
2022-06-03 08:09:47 -06:00
from common . api_helpers . exceptions import BadRequest
2024-01-12 15:11:22 +00:00
from common . api_helpers . filters import (
NO_TEAM_VALUE ,
DateRangeFilterMixin ,
ModelFieldFilterMixin ,
MultipleChoiceCharFilter ,
2024-10-24 11:24:36 +02:00
get_escalation_chain_queryset ,
get_integration_queryset ,
get_user_queryset ,
2024-01-12 15:11:22 +00:00
)
2024-10-03 01:09:50 +08:00
from common . api_helpers . mixins import (
AlertGroupEnrichingMixin ,
PreviewTemplateMixin ,
PublicPrimaryKeyMixin ,
TeamFilteringMixin ,
)
2023-12-06 13:10:56 +00:00
from common . api_helpers . paginators import AlertGroupCursorPaginator
2022-06-03 08:09:47 -06:00
2023-11-23 14:28:00 -03:00
class AlertGroupFilter ( DateRangeFilterMixin , ModelFieldFilterMixin , filters . FilterSet ) :
2022-06-03 08:09:47 -06:00
"""
Examples of possible date formats here https : / / docs . djangoproject . com / en / 1.9 / ref / settings / #datetime-input-formats
"""
2023-05-02 13:50:03 +08:00
FILTER_BY_INVOLVED_USERS_ALERT_GROUPS_CUTOFF = 1000
2022-06-03 08:09:47 -06:00
is_root = filters . BooleanFilter ( field_name = " root_alert_group " , lookup_expr = " isnull " )
status = filters . MultipleChoiceFilter ( choices = AlertGroup . STATUS_CHOICES , method = " filter_status " )
2024-01-12 15:11:22 +00:00
started_at = filters . CharFilter (
field_name = " started_at " ,
method = DateRangeFilterMixin . filter_date_range . __name__ ,
2022-06-03 08:09:47 -06:00
)
2024-01-12 15:11:22 +00:00
resolved_at = filters . CharFilter (
field_name = " resolved_at " ,
method = DateRangeFilterMixin . filter_date_range . __name__ ,
)
integration = MultipleChoiceCharFilter (
2023-06-08 16:49:48 -03:00
field_name = " channel " ,
2024-01-12 15:11:22 +00:00
queryset = get_integration_queryset ,
2022-06-03 08:09:47 -06:00
to_field_name = " public_primary_key " ,
method = ModelFieldFilterMixin . filter_model_field . __name__ ,
)
2024-01-12 15:11:22 +00:00
escalation_chain = MultipleChoiceCharFilter (
2023-03-14 14:38:18 +00:00
field_name = " channel_filter__escalation_chain " ,
2024-01-12 15:11:22 +00:00
queryset = get_escalation_chain_queryset ,
2023-03-14 14:38:18 +00:00
to_field_name = " public_primary_key " ,
method = ModelFieldFilterMixin . filter_model_field . __name__ ,
)
2024-01-12 15:11:22 +00:00
resolved_by = MultipleChoiceCharFilter (
2022-06-03 08:09:47 -06:00
field_name = " resolved_by_user " ,
2024-01-12 15:11:22 +00:00
queryset = get_user_queryset ,
2022-06-03 08:09:47 -06:00
to_field_name = " public_primary_key " ,
method = ModelFieldFilterMixin . filter_model_field . __name__ ,
)
2024-01-12 15:11:22 +00:00
acknowledged_by = MultipleChoiceCharFilter (
2022-06-03 08:09:47 -06:00
field_name = " acknowledged_by_user " ,
2024-01-12 15:11:22 +00:00
queryset = get_user_queryset ,
2022-06-03 08:09:47 -06:00
to_field_name = " public_primary_key " ,
method = ModelFieldFilterMixin . filter_model_field . __name__ ,
)
2024-01-12 15:11:22 +00:00
silenced_by = MultipleChoiceCharFilter (
field_name = " silenced_by_user " ,
queryset = get_user_queryset ,
to_field_name = " public_primary_key " ,
method = ModelFieldFilterMixin . filter_model_field . __name__ ,
)
invitees_are = MultipleChoiceCharFilter (
queryset = get_user_queryset , to_field_name = " public_primary_key " , method = " filter_invitees_are "
2022-06-03 08:09:47 -06:00
)
2024-01-12 15:11:22 +00:00
involved_users_are = MultipleChoiceCharFilter (
queryset = get_user_queryset , to_field_name = " public_primary_key " , method = " filter_by_involved_users "
2023-01-30 09:08:18 -03:00
)
2022-06-03 08:09:47 -06:00
with_resolution_note = filters . BooleanFilter ( method = " filter_with_resolution_note " )
2023-01-30 09:08:18 -03:00
mine = filters . BooleanFilter ( method = " filter_mine " )
Reworked declare incident escalation step (#5130)
Reworked https://github.com/grafana/oncall/pull/5047. Main update is the
switch from FK to a [M2M
relation](https://docs.google.com/document/d/1HeulqxoFShSHtInQrZNJLL5MDlHPNT50rVGaK3zZWvw/edit?disco=AAABVLjV4W8)
(which doesn't really change the original/intended behavior, besides not
needing to alter the alert group table, and it is a bit more flexible;
the extra table shouldn't introduce issues because this is used only for
tracking purposes and the information needed in the log record is
already there).
Avoid a db migration involving alert group table:
```
--
-- Create model RelatedIncident
--
CREATE TABLE `alerts_relatedincident` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `incident_id` varchar(50) NOT NULL, `created_at` datetime(6) NOT NULL, `is_active` bool NOT NULL, `channel_filter_id` bigint NULL, `organization_id` bigint NOT NULL);
CREATE TABLE `alerts_relatedincident_attached_alert_groups` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `relatedincident_id` bigint NOT NULL, `alertgroup_id` bigint NOT NULL);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincident_organization_id_incident_id_d7fc9a4f_uniq` UNIQUE (`organization_id`, `incident_id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_channel_filter_id_9556c836_fk_alerts_ch` FOREIGN KEY (`channel_filter_id`) REFERENCES `alerts_channelfilter` (`id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_organization_id_74ed6bed_fk_user_mana` FOREIGN KEY (`organization_id`) REFERENCES `user_management_organization` (`id`);
CREATE INDEX `alerts_relatedincident_incident_id_8356a799` ON `alerts_relatedincident` (`incident_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincident_a_relatedincident_id_alert_3d683baa_uniq` UNIQUE (`relatedincident_id`, `alertgroup_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_relatedincident_id_3e5e7a23_fk_alerts_re` FOREIGN KEY (`relatedincident_id`) REFERENCES `alerts_relatedincident` (`id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_alertgroup_id_0125deca_fk_alerts_al` FOREIGN KEY (`alertgroup_id`) REFERENCES `alerts_alertgroup` (`id`);
```
2024-10-07 16:26:10 -03:00
has_related_incident = filters . BooleanFilter ( field_name = " related_incidents " , lookup_expr = " isnull " , exclude = True )
2022-06-03 08:09:47 -06:00
def filter_status ( self , queryset , name , value ) :
if not value :
return queryset
try :
statuses = list ( map ( int , value ) )
except ValueError :
raise BadRequest ( detail = " Invalid status value " )
filters = { }
q_objects = Q ( )
if AlertGroup . NEW in statuses :
2023-01-17 23:28:29 +13:00
filters [ " new " ] = AlertGroup . get_new_state_filter ( )
2022-06-03 08:09:47 -06:00
if AlertGroup . SILENCED in statuses :
2023-01-17 23:28:29 +13:00
filters [ " silenced " ] = AlertGroup . get_silenced_state_filter ( )
2022-06-03 08:09:47 -06:00
if AlertGroup . ACKNOWLEDGED in statuses :
2023-01-17 23:28:29 +13:00
filters [ " acknowledged " ] = AlertGroup . get_acknowledged_state_filter ( )
2022-06-03 08:09:47 -06:00
if AlertGroup . RESOLVED in statuses :
2023-01-17 23:28:29 +13:00
filters [ " resolved " ] = AlertGroup . get_resolved_state_filter ( )
2022-06-03 08:09:47 -06:00
for item in filters :
q_objects | = filters [ item ]
queryset = queryset . filter ( q_objects )
return queryset
def filter_invitees_are ( self , queryset , name , value ) :
users = value
if not users :
return queryset
2023-01-30 09:08:18 -03:00
queryset = queryset . filter ( log_records__author__in = users ) . distinct ( )
return queryset
def filter_by_involved_users ( self , queryset , name , value ) :
users = value
if not users :
return queryset
2023-04-27 11:20:45 +08:00
# This is expensive to filter all alert groups with involved users,
# so we limit the number of alert groups to filter by the last 1000 for the given user(s)
alert_group_notified_users_ids = list (
UserNotificationPolicyLogRecord . objects . filter ( author__in = users )
. order_by ( " -alert_group_id " )
. values_list ( " alert_group_id " , flat = True )
2023-05-02 13:50:03 +08:00
. distinct ( ) [ : self . FILTER_BY_INVOLVED_USERS_ALERT_GROUPS_CUTOFF ]
2023-04-27 11:20:45 +08:00
)
2023-01-30 09:08:18 -03:00
queryset = queryset . filter (
2023-03-07 08:38:50 -03:00
# user was notified
2023-04-27 11:20:45 +08:00
Q ( id__in = alert_group_notified_users_ids )
2023-03-07 08:38:50 -03:00
|
# or interacted with the alert group
2023-03-08 12:27:03 -03:00
Q ( acknowledged_by_user__in = users )
| Q ( resolved_by_user__in = users )
| Q ( silenced_by_user__in = users )
2023-01-30 09:08:18 -03:00
) . distinct ( )
return queryset
2022-06-03 08:09:47 -06:00
2023-01-30 09:08:18 -03:00
def filter_mine ( self , queryset , name , value ) :
if value :
2023-03-08 12:27:03 -03:00
return self . filter_by_involved_users ( queryset , " users " , [ self . request . user ] )
2022-06-03 08:09:47 -06:00
return queryset
def filter_with_resolution_note ( self , queryset , name , value ) :
if value is True :
queryset = queryset . filter ( Q ( resolution_notes__isnull = False , resolution_notes__deleted_at = None ) ) . distinct ( )
elif value is False :
queryset = queryset . filter (
Q ( resolution_notes__isnull = True ) | ~ Q ( resolution_notes__deleted_at = None )
) . distinct ( )
return queryset
2022-08-17 08:46:53 +01:00
class AlertGroupTeamFilteringMixin ( TeamFilteringMixin ) :
2023-04-20 21:01:41 +08:00
TEAM_LOOKUP = " team "
2022-08-17 08:46:53 +01:00
2023-01-22 00:14:48 +08:00
def retrieve ( self , request , * args , * * kwargs ) :
try :
return super ( ) . retrieve ( request , * args , * * kwargs )
except NotFound :
Optimize alertgroups endpoint (#1189)
# What this PR does
Changing query to retrieve alert group in two completely different
queries instead of one with `join`
new queries
```
SELECT alerts_alertreceivechannel.id
FROM alerts_alertreceivechannel
WHERE (alerts_alertreceivechannel.deleted_at IS NULL
AND alerts_alertreceivechannel.organization_id = 8
AND alerts_alertreceivechannel.team_id IS NULL)
SELECT `alerts_alertgroup`.`id`
FROM `alerts_alertgroup`
WHERE (`alerts_alertgroup`.`channel_id` IN (2,33,34,35,36,40,52,59,61,62,63,70,76,89,93,94,03,08,09,10,12,13,16,18,20,22,23,24,26,27,28,30,31,33,34,35,36,40,41,42,43,45,48,53,56,57,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,86,87,88,89,91,93,23,27,29,31,32,33,55,56,57,58,65,69,72,75,81,13,17,20,22,33,34,38,39,41,44,45,46,51,52,55,56,58,59,60,63,68,70,71)
AND NOT `alerts_alertgroup`.`is_archived`
AND NOT `alerts_alertgroup`.`is_archived`
AND `alerts_alertgroup`.`root_alert_group_id` IS NULL
AND ((NOT `alerts_alertgroup`.`silenced`
AND NOT `alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`)
OR (`alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`))
AND NOT `alerts_alertgroup`.`is_archived`)
ORDER BY `alerts_alertgroup`.`id` DESC
LIMIT 26
```
## Which issue(s) this PR fixes
## Checklist
- [ ] Tests updated
- [ ] Documentation added
- [ ] `CHANGELOG.md` updated
2023-01-22 00:53:11 +08:00
alert_receive_channels_ids = list (
AlertReceiveChannel . objects . filter (
organization_id = self . request . auth . organization . id ,
) . values_list ( " id " , flat = True )
)
2023-07-18 13:48:34 +02:00
queryset = AlertGroup . objects . filter (
Optimize alertgroups endpoint (#1189)
# What this PR does
Changing query to retrieve alert group in two completely different
queries instead of one with `join`
new queries
```
SELECT alerts_alertreceivechannel.id
FROM alerts_alertreceivechannel
WHERE (alerts_alertreceivechannel.deleted_at IS NULL
AND alerts_alertreceivechannel.organization_id = 8
AND alerts_alertreceivechannel.team_id IS NULL)
SELECT `alerts_alertgroup`.`id`
FROM `alerts_alertgroup`
WHERE (`alerts_alertgroup`.`channel_id` IN (2,33,34,35,36,40,52,59,61,62,63,70,76,89,93,94,03,08,09,10,12,13,16,18,20,22,23,24,26,27,28,30,31,33,34,35,36,40,41,42,43,45,48,53,56,57,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,86,87,88,89,91,93,23,27,29,31,32,33,55,56,57,58,65,69,72,75,81,13,17,20,22,33,34,38,39,41,44,45,46,51,52,55,56,58,59,60,63,68,70,71)
AND NOT `alerts_alertgroup`.`is_archived`
AND NOT `alerts_alertgroup`.`is_archived`
AND `alerts_alertgroup`.`root_alert_group_id` IS NULL
AND ((NOT `alerts_alertgroup`.`silenced`
AND NOT `alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`)
OR (`alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`))
AND NOT `alerts_alertgroup`.`is_archived`)
ORDER BY `alerts_alertgroup`.`id` DESC
LIMIT 26
```
## Which issue(s) this PR fixes
## Checklist
- [ ] Tests updated
- [ ] Documentation added
- [ ] `CHANGELOG.md` updated
2023-01-22 00:53:11 +08:00
channel__in = alert_receive_channels_ids ,
2023-01-22 00:14:48 +08:00
) . only ( " public_primary_key " )
try :
obj = queryset . get ( public_primary_key = self . kwargs [ " pk " ] )
except ObjectDoesNotExist :
raise NotFound
obj_team = self . _getattr_with_related ( obj , self . TEAM_LOOKUP )
if obj_team is None or obj_team in self . request . user . teams . all ( ) :
if obj_team is None :
obj_team = Team ( public_primary_key = None , name = " General " , email = None , avatar_url = None )
return Response (
data = { " error_code " : " wrong_team " , " owner_team " : TeamSerializer ( obj_team ) . data } ,
status = status . HTTP_403_FORBIDDEN ,
)
return Response ( data = { " error_code " : " wrong_team " } , status = status . HTTP_403_FORBIDDEN )
2022-08-17 08:46:53 +01:00
2024-07-23 17:12:00 +01:00
class AlertGroupSearchFilter ( SearchFilter ) :
def filter_queryset ( self , request , queryset , view ) :
search_fields = self . get_search_fields ( view , request )
search_terms = self . get_search_terms ( request )
if not search_fields or not search_terms :
return queryset
if settings . FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS :
started_at = request . query_params . get ( " started_at " )
end = DateRangeFilterMixin . parse_custom_datetime_range ( started_at ) [ 1 ] if started_at else timezone . now ( )
queryset = queryset . filter (
started_at__gte = end - timedelta ( days = settings . FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS )
)
return super ( ) . filter_queryset ( request , queryset , view )
def get_search_fields ( self , view , request ) :
return (
[ " =public_primary_key " , " =inside_organization_number " , " web_title_cache " ]
if settings . FEATURE_ALERT_GROUP_SEARCH_ENABLED
else [ ]
)
2022-06-03 08:09:47 -06:00
class AlertGroupView (
2024-10-03 01:09:50 +08:00
AlertGroupEnrichingMixin ,
2022-06-03 08:09:47 -06:00
PreviewTemplateMixin ,
2022-08-17 08:46:53 +01:00
AlertGroupTeamFilteringMixin ,
2024-01-30 13:07:19 -05:00
PublicPrimaryKeyMixin [ AlertGroup ] ,
2022-06-03 08:09:47 -06:00
mixins . RetrieveModelMixin ,
mixins . ListModelMixin ,
2023-11-06 15:30:32 -05:00
mixins . DestroyModelMixin ,
2022-06-03 08:09:47 -06:00
viewsets . GenericViewSet ,
) :
2024-01-12 15:11:22 +00:00
"""
Internal API endpoints for alert groups .
"""
2022-06-03 08:09:47 -06:00
authentication_classes = (
MobileAppAuthTokenAuthentication ,
PluginAuthentication ,
)
2022-11-29 09:41:56 +01:00
permission_classes = ( IsAuthenticated , RBACPermission )
rbac_permissions = {
" metadata " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" list " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" retrieve " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" stats " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" filters " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" silence_options " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" bulk_action_options " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
" destroy " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" acknowledge " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" unacknowledge " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" resolve " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" unresolve " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" attach " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" unattach " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" silence " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" unsilence " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
2023-01-17 12:19:08 -03:00
" unpage_user " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
2022-11-29 09:41:56 +01:00
" bulk_action " : [ RBACPermission . Permissions . ALERT_GROUPS_WRITE ] ,
" preview_template " : [ RBACPermission . Permissions . INTEGRATIONS_TEST ] ,
2024-01-10 15:52:59 +01:00
" escalation_snapshot " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
2025-01-16 09:19:32 -03:00
" filter_affected_services " : [ RBACPermission . Permissions . ALERT_GROUPS_READ ] ,
2022-06-03 08:09:47 -06:00
}
2024-01-12 15:11:22 +00:00
queryset = AlertGroup . objects . none ( ) # needed for drf-spectacular introspection
2022-06-03 08:09:47 -06:00
serializer_class = AlertGroupSerializer
2023-12-06 13:10:56 +00:00
pagination_class = AlertGroupCursorPaginator
2022-06-03 08:09:47 -06:00
2024-07-23 17:12:00 +01:00
filter_backends = [ AlertGroupSearchFilter , filters . DjangoFilterBackend ]
2022-06-03 08:09:47 -06:00
filterset_class = AlertGroupFilter
2022-07-27 12:14:59 +01:00
def get_serializer_class ( self ) :
if self . action == " list " :
return AlertGroupListSerializer
return super ( ) . get_serializer_class ( )
2025-01-16 09:19:32 -03:00
def _get_queryset (
self ,
action = None ,
ignore_filtering_by_available_teams = False ,
team_values = None ,
started_at = None ,
label_query = None ,
) :
# make base get_queryset reusable via params
if action is None :
# assume stats by default
action = " stats "
2023-11-02 16:45:30 +08:00
alert_receive_channels_qs = AlertReceiveChannel . objects_with_deleted . filter (
2023-04-20 21:01:41 +08:00
organization_id = self . request . auth . organization . id
Optimize alertgroups endpoint (#1189)
# What this PR does
Changing query to retrieve alert group in two completely different
queries instead of one with `join`
new queries
```
SELECT alerts_alertreceivechannel.id
FROM alerts_alertreceivechannel
WHERE (alerts_alertreceivechannel.deleted_at IS NULL
AND alerts_alertreceivechannel.organization_id = 8
AND alerts_alertreceivechannel.team_id IS NULL)
SELECT `alerts_alertgroup`.`id`
FROM `alerts_alertgroup`
WHERE (`alerts_alertgroup`.`channel_id` IN (2,33,34,35,36,40,52,59,61,62,63,70,76,89,93,94,03,08,09,10,12,13,16,18,20,22,23,24,26,27,28,30,31,33,34,35,36,40,41,42,43,45,48,53,56,57,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,86,87,88,89,91,93,23,27,29,31,32,33,55,56,57,58,65,69,72,75,81,13,17,20,22,33,34,38,39,41,44,45,46,51,52,55,56,58,59,60,63,68,70,71)
AND NOT `alerts_alertgroup`.`is_archived`
AND NOT `alerts_alertgroup`.`is_archived`
AND `alerts_alertgroup`.`root_alert_group_id` IS NULL
AND ((NOT `alerts_alertgroup`.`silenced`
AND NOT `alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`)
OR (`alerts_alertgroup`.`acknowledged`
AND NOT `alerts_alertgroup`.`resolved`))
AND NOT `alerts_alertgroup`.`is_archived`)
ORDER BY `alerts_alertgroup`.`id` DESC
LIMIT 26
```
## Which issue(s) this PR fixes
## Checklist
- [ ] Tests updated
- [ ] Documentation added
- [ ] `CHANGELOG.md` updated
2023-01-22 00:53:11 +08:00
)
2023-04-20 21:01:41 +08:00
if not ignore_filtering_by_available_teams :
alert_receive_channels_qs = alert_receive_channels_qs . filter ( * self . available_teams_lookup_args )
2023-11-23 14:28:00 -03:00
# Filter by team(s). Since we really filter teams from integrations, this is not an AlertGroup model filter.
# This is based on the common.api_helpers.ByTeamModelFieldFilterMixin implementation
if team_values :
null_team_lookup = Q ( team__isnull = True ) if NO_TEAM_VALUE in team_values else None
teams_lookup = Q ( team__public_primary_key__in = [ ppk for ppk in team_values if ppk != NO_TEAM_VALUE ] )
if null_team_lookup :
teams_lookup = teams_lookup | null_team_lookup
alert_receive_channels_qs = alert_receive_channels_qs . filter ( teams_lookup )
2023-04-20 21:01:41 +08:00
alert_receive_channels_ids = list ( alert_receive_channels_qs . values_list ( " id " , flat = True ) )
2023-11-06 10:31:12 +00:00
queryset = AlertGroup . objects . filter ( channel__in = alert_receive_channels_ids )
2023-03-22 00:57:20 +08:00
2025-01-16 09:19:32 -03:00
if action in ( " list " , " stats " ) and not started_at :
2024-09-24 19:13:51 +02:00
queryset = queryset . filter ( started_at__gte = timezone . now ( ) - timezone . timedelta ( days = 30 ) )
2025-01-16 09:19:32 -03:00
if action in ( " list " , " stats " ) and settings . ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX :
2024-07-23 17:12:00 +01:00
# workaround related to MySQL "ORDER BY LIMIT Query Optimizer Bug"
# read more: https://hackmysql.com/infamous-order-by-limit-query-optimizer-bug/
2024-07-25 06:21:32 -03:00
from django_mysql . models import add_QuerySetMixin
queryset = add_QuerySetMixin ( queryset )
queryset = queryset . force_index ( " alert_group_list_index " )
2024-07-23 17:12:00 +01:00
Webhook labels (#3383)
This PR add labels for webhooks.
1. Make webhook "labelable" with ability to filter by labels.
2. Add labels to the webhook payload. It contain new field webhook with
it's name, id and labels. Field integration and alert_group has a
corresponding label field as well. See example of a new payload below:
```
{
"event": {
"type": "escalation"
},
"user": null,
"alert_group": {
"id": "IRFN6ZD31N31B",
"integration_id": "CTWM7U4A2QG97",
"route_id": "RUE7U7Z46SKGY",
"alerts_count": 1,
"state": "firing",
"created_at": "2023-11-22T08:54:55.178243Z",
"resolved_at": null,
"acknowledged_at": null,
"title": "Incident",
"permalinks": {
"slack": null,
"telegram": null,
"web": "http://grafana:3000/a/grafana-oncall-app/alert-groups/IRFN6ZD31N31B"
},
"labels": {
"severity": "critical"
}
},
"alert_group_id": "IRFN6ZD31N31B",
"alert_payload": {
"message": "This alert was sent by user for demonstration purposes"
},
"integration": {
"id": "CTWM7U4A2QG97",
"type": "webhook",
"name": "hi - Webhook",
"team": null,
"labels": {
"hello": "world",
"severity": "critical"
}
},
"notified_users": [],
"users_to_be_notified": [],
"webhook": {
"id": "WHAXK4BTC7TAEQ",
"name": "test",
"labels": {
"hello": "kesha"
}
}
}
```
I feel that there is an opportunity to make code cleaner - remove all
label logic from serializers, views and utils to models or dedicated
LabelerService and introduce Labelable interface with something like
label_verbal, update_labels methods. However, I don't want to tie
webhook labels with a refactoring.
---------
Co-authored-by: Dominik <dominik.broj@grafana.com>
2023-11-22 19:17:41 +08:00
# Filter by labels. Since alert group labels are "static" filter by names, not IDs.
2025-01-16 09:19:32 -03:00
if label_query :
kv_pairs = parse_label_query ( label_query )
for key , value in kv_pairs :
# Utilize (organization, key_name, value_name, alert_group) index on AlertGroupAssociatedLabel
queryset = queryset . filter (
labels__organization = self . request . auth . organization ,
labels__key_name = key ,
labels__value_name = value ,
)
2023-03-22 00:57:20 +08:00
2022-07-27 12:14:59 +01:00
return queryset
2025-01-16 09:19:32 -03:00
def get_queryset ( self , ignore_filtering_by_available_teams = False ) :
# no select_related or prefetch_related is used at this point, it will be done on paginate_queryset.
return self . _get_queryset (
action = self . action ,
ignore_filtering_by_available_teams = ignore_filtering_by_available_teams ,
team_values = self . request . query_params . getlist ( " team " , [ ] ) ,
started_at = self . request . query_params . get ( " started_at " ) ,
label_query = self . request . query_params . getlist ( " label " , [ ] ) ,
)
2022-07-27 12:14:59 +01:00
def get_object ( self ) :
obj = super ( ) . get_object ( )
obj = self . enrich ( [ obj ] ) [ 0 ]
return obj
2022-06-03 08:09:47 -06:00
2024-01-12 15:11:22 +00:00
def retrieve ( self , request , * args , * * kwargs ) :
2023-09-11 10:25:00 -03:00
""" Return alert group details.
It is worth mentioning that ` render_after_resolve_report_json ` property will return a list
of log entries including actions involving the alert group , notifications triggered for a user
and resolution notes updates .
A few additional notes about the possible values for each key in the logs :
- ` time ` : humanized time delta respect to now when the action took place
- ` action ` : human - readable description of the action
- ` realm ` : resource involved in the action ; one of three possible values :
` alert_group ` , ` user_notification ` , ` resolution_note `
- ` type ` : integer value indicating the type of action ( see below )
- ` created_at ` : timestamp corresponding to when the action happened
- ` author ` : details about the user performing the action
Possible ` type ` values depending on the realm value :
For ` alert_group ` :
- 0 : Acknowledged
- 1 : Unacknowledged
- 2 : Invite
- 3 : Stop invitation
- 4 : Re - invite
- 5 : Escalation triggered
- 6 : Invitation triggered
- 7 : Silenced
- 8 : Attached
- 9 : Unattached
- 10 : Custom button triggered
- 11 : Unacknowledged by timeout
- 12 : Failed attachment
- 13 : Incident resolved
- 14 : Incident unresolved
- 15 : Unsilenced
- 16 : Escalation finished
- 17 : Escalation failed
- 18 : Acknowledge reminder triggered
- 19 : Wiped
- 20 : Deleted
- 21 : Incident registered
- 22 : A route is assigned to the incident
- 23 : Trigger direct paging escalation
- 24 : Unpage a user
- 25 : Restricted
For ` user_notification ` :
- 0 : Personal notification triggered
- 1 : Personal notification finished
- 2 : Personal notification success ,
- 3 : Personal notification failed
For ` resolution_note ` :
- 0 : slack
- 1 : web
"""
2024-01-12 15:11:22 +00:00
return super ( ) . retrieve ( request , * args , * * kwargs )
2023-09-11 10:25:00 -03:00
2023-11-06 15:30:32 -05:00
def destroy ( self , request , * args , * * kwargs ) :
instance = self . get_object ( )
delete_alert_group . apply_async ( ( instance . pk , request . user . pk ) )
return Response ( status = status . HTTP_204_NO_CONTENT )
2024-01-12 15:11:22 +00:00
@extend_schema (
filters = True , # filter alert groups before counting them
responses = inline_serializer ( name = " AlertGroupStats " , fields = { " count " : serializers . IntegerField ( ) } ) ,
)
@action ( methods = [ " get " ] , detail = False )
def stats ( self , request ) :
2023-11-16 07:15:05 -05:00
"""
Return number of alert groups capped at 100001
"""
2023-06-27 10:58:16 +08:00
MAX_COUNT = 100001
alert_groups = self . filter_queryset ( self . get_queryset ( ) ) [ : MAX_COUNT ]
count = alert_groups . count ( )
count = f " { MAX_COUNT - 1 } + " if count == MAX_COUNT else str ( count )
2024-01-12 15:11:22 +00:00
return Response ( { " count " : count } )
2022-06-03 08:09:47 -06:00
2024-01-12 15:11:22 +00:00
@extend_schema ( responses = AlertGroupSerializer )
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def acknowledge ( self , request , pk ) :
2023-11-16 07:15:05 -05:00
"""
Acknowledge an alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if alert_group . is_maintenance_incident :
raise BadRequest ( detail = " Can ' t acknowledge maintenance alert group " )
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t acknowledge an attached alert group " )
2024-03-27 13:37:01 +01:00
alert_group . acknowledge_by_user_or_backsync ( self . request . user , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema ( responses = AlertGroupSerializer )
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def unacknowledge ( self , request , pk ) :
2023-11-16 07:15:05 -05:00
"""
Unacknowledge an alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if alert_group . is_maintenance_incident :
raise BadRequest ( detail = " Can ' t unacknowledge maintenance alert group " )
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t unacknowledge an attached alert group " )
if not alert_group . acknowledged :
raise BadRequest ( detail = " The alert group is not acknowledged " )
if alert_group . resolved :
raise BadRequest ( detail = " Can ' t unacknowledge a resolved alert group " )
2024-03-27 13:37:01 +01:00
alert_group . un_acknowledge_by_user_or_backsync ( self . request . user , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema (
request = inline_serializer (
name = " AlertGroupResolve " , fields = { " resolution_note " : serializers . CharField ( required = False , allow_null = True ) }
) ,
responses = AlertGroupSerializer ,
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def resolve ( self , request , pk ) :
2023-11-16 07:15:05 -05:00
"""
Resolve an alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
organization = self . request . user . organization
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t resolve an attached alert group " )
if alert_group . is_maintenance_incident :
alert_group . stop_maintenance ( self . request . user )
else :
2023-06-07 20:19:16 +08:00
resolution_note_text = request . data . get ( " resolution_note " )
if resolution_note_text :
rn = ResolutionNote . objects . create (
alert_group = alert_group ,
author = self . request . user ,
2023-10-20 15:22:45 +01:00
source = (
ResolutionNote . Source . MOBILE_APP
if isinstance ( self . request . successful_authenticator , MobileAppAuthTokenAuthentication )
else ResolutionNote . Source . WEB
) ,
2023-06-07 20:19:16 +08:00
message_text = resolution_note_text [ : 3000 ] , # trim text to fit in the db field
)
send_update_resolution_note_signal . apply_async (
kwargs = {
" alert_group_pk " : alert_group . pk ,
" resolution_note_pk " : rn . pk ,
}
2022-06-03 08:09:47 -06:00
)
2023-06-07 20:19:16 +08:00
else :
# Check resolution note required setting only if resolution_note_text was not provided.
if organization . is_resolution_note_required and not alert_group . has_resolution_notes :
return Response (
data = {
" code " : AlertGroupAPIError . RESOLUTION_NOTE_REQUIRED . value ,
" detail " : " Alert group without resolution note cannot be resolved due to organization settings " ,
} ,
status = status . HTTP_400_BAD_REQUEST ,
)
2024-03-27 13:37:01 +01:00
alert_group . resolve_by_user_or_backsync ( self . request . user , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema ( responses = AlertGroupSerializer )
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def unresolve ( self , request , pk ) :
2023-11-16 07:15:05 -05:00
"""
Unresolve an alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if alert_group . is_maintenance_incident :
raise BadRequest ( detail = " Can ' t unresolve maintenance alert group " )
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t unresolve an attached alert group " )
if not alert_group . resolved :
raise BadRequest ( detail = " The alert group is not resolved " )
2024-03-27 13:37:01 +01:00
alert_group . un_resolve_by_user_or_backsync ( self . request . user , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema (
request = inline_serializer ( name = " AlertGroupAttach " , fields = { " root_alert_group_pk " : serializers . CharField ( ) } ) ,
responses = AlertGroupSerializer ,
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def attach ( self , request , pk = None ) :
2023-08-16 14:13:56 +08:00
"""
Attach alert group to another alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if alert_group . is_maintenance_incident :
raise BadRequest ( detail = " Can ' t attach maintenance alert group " )
if alert_group . dependent_alert_groups . count ( ) > 0 :
raise BadRequest ( detail = " Can ' t attach an alert group because it has another alert groups attached to it " )
if not alert_group . is_root_alert_group :
raise BadRequest ( detail = " Can ' t attach an alert group because it has already been attached " )
try :
root_alert_group = self . get_queryset ( ) . get ( public_primary_key = request . data [ " root_alert_group_pk " ] )
except AlertGroup . DoesNotExist :
return Response ( status = status . HTTP_400_BAD_REQUEST )
if root_alert_group . resolved or root_alert_group . root_alert_group is not None :
return Response ( status = status . HTTP_400_BAD_REQUEST )
if root_alert_group == alert_group :
return Response ( status = status . HTTP_400_BAD_REQUEST )
alert_group . attach_by_user ( self . request . user , root_alert_group , action_source = ActionSource . WEB )
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema ( responses = AlertGroupSerializer )
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def unattach ( self , request , pk = None ) :
2023-11-16 07:15:05 -05:00
"""
Unattach an alert group that is already attached to another alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if alert_group . is_maintenance_incident :
raise BadRequest ( detail = " Can ' t unattach maintenance alert group " )
if alert_group . is_root_alert_group :
raise BadRequest ( detail = " Can ' t unattach an alert group because it is not attached " )
2022-07-27 12:14:59 +01:00
2022-06-03 08:09:47 -06:00
alert_group . un_attach_by_user ( self . request . user , action_source = ActionSource . WEB )
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : self . request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema (
request = inline_serializer ( name = " AlertGroupSilence " , fields = { " delay " : serializers . IntegerField ( ) } ) ,
responses = AlertGroupSerializer ,
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def silence ( self , request , pk = None ) :
2023-11-16 07:15:05 -05:00
"""
Silence an alert group for a specified delay
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
delay = request . data . get ( " delay " )
if delay is None :
raise BadRequest ( detail = " Please specify a delay for silence " )
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t silence an attached alert group " )
2024-03-27 13:37:01 +01:00
alert_group . silence_by_user_or_backsync ( request . user , silence_delay = delay , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : request } ) . data )
2023-08-16 14:13:56 +08:00
@extend_schema (
responses = inline_serializer (
2024-01-12 15:11:22 +00:00
name = " AlertGroupSilenceOptions " ,
fields = {
" value " : serializers . ChoiceField ( choices = [ value for value , _ in AlertGroup . SILENCE_DELAY_OPTIONS ] ) ,
" display_name " : serializers . ChoiceField (
choices = [ display_name for _ , display_name in AlertGroup . SILENCE_DELAY_OPTIONS ]
) ,
} ,
2024-05-23 12:06:00 +01:00
many = True ,
2023-08-16 14:13:56 +08:00
)
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " get " ] , detail = False )
def silence_options ( self , request ) :
2024-06-03 14:06:47 +01:00
# TODO: DEPRECATED, REMOVE IN A FUTURE RELEASE
2023-11-16 07:15:05 -05:00
"""
Retrieve a list of valid silence options
"""
2022-06-03 08:09:47 -06:00
data = [
{ " value " : value , " display_name " : display_name } for value , display_name in AlertGroup . SILENCE_DELAY_OPTIONS
]
return Response ( data )
2024-01-12 15:11:22 +00:00
@extend_schema ( responses = AlertGroupSerializer )
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = True )
def unsilence ( self , request , pk = None ) :
2023-11-16 07:15:05 -05:00
"""
Unsilence a silenced alert group
"""
2022-06-03 08:09:47 -06:00
alert_group = self . get_object ( )
if not alert_group . silenced :
raise BadRequest ( detail = " The alert group is not silenced " )
if alert_group . resolved :
raise BadRequest ( detail = " Can ' t unsilence a resolved alert group " )
if alert_group . acknowledged :
raise BadRequest ( detail = " Can ' t unsilence an acknowledged alert group " )
if alert_group . root_alert_group is not None :
raise BadRequest ( detail = " Can ' t unsilence an attached alert group " )
2024-03-27 13:37:01 +01:00
alert_group . un_silence_by_user_or_backsync ( request . user , action_source = ActionSource . WEB )
2022-06-03 08:09:47 -06:00
return Response ( AlertGroupSerializer ( alert_group , context = { " request " : request } ) . data )
2024-01-12 15:11:22 +00:00
@extend_schema (
request = inline_serializer ( name = " AlertGroupUnpageUser " , fields = { " user_id " : serializers . CharField ( ) } ) ,
responses = AlertGroupSerializer ,
)
2023-01-17 12:19:08 -03:00
@action ( methods = [ " post " ] , detail = True )
def unpage_user ( self , request , pk = None ) :
2023-11-16 07:15:05 -05:00
"""
Remove a user that was directly paged for the alert group
"""
2023-01-17 12:19:08 -03:00
organization = request . auth . organization
from_user = request . user
alert_group = self . get_object ( )
try :
user_id = request . data [ " user_id " ]
except KeyError :
raise BadRequest ( detail = " Please specify user_id " )
try :
user = organization . users . get ( public_primary_key = user_id )
except User . DoesNotExist :
raise BadRequest ( detail = " User not found " )
unpage_user ( alert_group = alert_group , user = user , from_user = from_user )
return Response ( status = status . HTTP_200_OK )
2024-01-12 15:11:22 +00:00
@extend_schema (
responses = inline_serializer (
name = " AlertGroupFilters " ,
fields = {
" name " : serializers . CharField ( ) ,
" type " : serializers . CharField ( ) ,
" href " : serializers . CharField ( required = False ) ,
" global " : serializers . BooleanField ( required = False ) ,
" default " : serializers . JSONField ( required = False ) ,
" description " : serializers . CharField ( required = False ) ,
" options " : inline_serializer (
name = " AlertGroupFiltersOptions " ,
fields = {
" value " : serializers . CharField ( ) ,
" display_name " : serializers . IntegerField ( ) ,
} ,
) ,
} ,
many = True ,
)
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " get " ] , detail = False )
def filters ( self , request ) :
2023-11-16 07:15:05 -05:00
"""
Retrieve a list of valid filter options that can be used to filter alert groups
"""
Reworked declare incident escalation step (#5130)
Reworked https://github.com/grafana/oncall/pull/5047. Main update is the
switch from FK to a [M2M
relation](https://docs.google.com/document/d/1HeulqxoFShSHtInQrZNJLL5MDlHPNT50rVGaK3zZWvw/edit?disco=AAABVLjV4W8)
(which doesn't really change the original/intended behavior, besides not
needing to alter the alert group table, and it is a bit more flexible;
the extra table shouldn't introduce issues because this is used only for
tracking purposes and the information needed in the log record is
already there).
Avoid a db migration involving alert group table:
```
--
-- Create model RelatedIncident
--
CREATE TABLE `alerts_relatedincident` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `incident_id` varchar(50) NOT NULL, `created_at` datetime(6) NOT NULL, `is_active` bool NOT NULL, `channel_filter_id` bigint NULL, `organization_id` bigint NOT NULL);
CREATE TABLE `alerts_relatedincident_attached_alert_groups` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `relatedincident_id` bigint NOT NULL, `alertgroup_id` bigint NOT NULL);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincident_organization_id_incident_id_d7fc9a4f_uniq` UNIQUE (`organization_id`, `incident_id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_channel_filter_id_9556c836_fk_alerts_ch` FOREIGN KEY (`channel_filter_id`) REFERENCES `alerts_channelfilter` (`id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_organization_id_74ed6bed_fk_user_mana` FOREIGN KEY (`organization_id`) REFERENCES `user_management_organization` (`id`);
CREATE INDEX `alerts_relatedincident_incident_id_8356a799` ON `alerts_relatedincident` (`incident_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincident_a_relatedincident_id_alert_3d683baa_uniq` UNIQUE (`relatedincident_id`, `alertgroup_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_relatedincident_id_3e5e7a23_fk_alerts_re` FOREIGN KEY (`relatedincident_id`) REFERENCES `alerts_relatedincident` (`id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_alertgroup_id_0125deca_fk_alerts_al` FOREIGN KEY (`alertgroup_id`) REFERENCES `alerts_alertgroup` (`id`);
```
2024-10-07 16:26:10 -03:00
organization = self . request . auth . organization
2022-06-03 08:09:47 -06:00
api_root = " /api/internal/v1/ "
2024-05-22 13:02:56 +01:00
default_day_range = 30
2022-06-03 08:09:47 -06:00
2024-05-22 13:02:56 +01:00
default_datetime_range = f " now- { default_day_range } d_now "
2022-06-03 08:09:47 -06:00
filter_options = [
2023-03-22 00:57:20 +08:00
{
" name " : " team " ,
" type " : " team_select " ,
" href " : api_root + " teams/ " ,
" global " : True ,
} ,
2022-06-03 08:09:47 -06:00
{ " name " : " integration " , " type " : " options " , " href " : api_root + " alert_receive_channels/?filters=true " } ,
2023-03-14 14:38:18 +00:00
{ " name " : " escalation_chain " , " type " : " options " , " href " : api_root + " escalation_chains/?filters=true " } ,
2022-06-03 08:09:47 -06:00
{
" name " : " acknowledged_by " ,
" type " : " options " ,
" href " : api_root + " users/?filters=true&roles=0&roles=1&roles=2 " ,
" default " : { " display_name " : self . request . user . username , " value " : self . request . user . public_primary_key } ,
} ,
{
" name " : " resolved_by " ,
" type " : " options " ,
" href " : api_root + " users/?filters=true&roles=0&roles=1&roles=2 " ,
} ,
{
" name " : " silenced_by " ,
" type " : " options " ,
" href " : api_root + " users/?filters=true&roles=0&roles=1&roles=2 " ,
} ,
{
" name " : " invitees_are " ,
" type " : " options " ,
" href " : api_root + " users/?filters=true&roles=0&roles=1&roles=2 " ,
} ,
2023-01-30 09:08:18 -03:00
{
" name " : " involved_users_are " ,
" type " : " options " ,
" href " : api_root + " users/?filters=true&roles=0&roles=1&roles=2 " ,
" default " : { " display_name " : self . request . user . username , " value " : self . request . user . public_primary_key } ,
2023-05-02 13:50:03 +08:00
" description " : f " This filter works only for last { AlertGroupFilter . FILTER_BY_INVOLVED_USERS_ALERT_GROUPS_CUTOFF } alert groups these users involved in. " ,
2023-01-30 09:08:18 -03:00
} ,
2022-06-03 08:09:47 -06:00
{
" name " : " status " ,
" type " : " options " ,
" options " : [
2023-03-22 00:57:20 +08:00
{ " display_name " : " firing " , " value " : AlertGroup . NEW } ,
2022-06-03 08:09:47 -06:00
{ " display_name " : " acknowledged " , " value " : AlertGroup . ACKNOWLEDGED } ,
{ " display_name " : " resolved " , " value " : AlertGroup . RESOLVED } ,
{ " display_name " : " silenced " , " value " : AlertGroup . SILENCED } ,
] ,
} ,
{
" name " : " started_at " ,
" type " : " daterange " ,
" default " : default_datetime_range ,
} ,
{
" name " : " resolved_at " ,
" type " : " daterange " ,
" default " : default_datetime_range ,
} ,
{
" name " : " with_resolution_note " ,
" type " : " boolean " ,
" default " : " true " ,
} ,
2023-01-30 09:08:18 -03:00
{
" name " : " mine " ,
" type " : " boolean " ,
" default " : " true " ,
2023-05-02 13:50:03 +08:00
" description " : f " This filter works only for last { AlertGroupFilter . FILTER_BY_INVOLVED_USERS_ALERT_GROUPS_CUTOFF } alert groups you ' re involved in. " ,
2023-01-30 09:08:18 -03:00
} ,
2022-06-03 08:09:47 -06:00
]
2024-07-22 11:30:28 +01:00
if settings . FEATURE_ALERT_GROUP_SEARCH_ENABLED :
2024-07-23 17:12:00 +01:00
description = " Search by alert group ID, number or title. "
if settings . FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS :
description + = (
f " The search is limited to alert groups started in the last "
f " { settings . FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS } days of the specified date range. "
)
filter_options = [ { " name " : " search " , " type " : " search " , " description " : description } ] + filter_options
2024-07-22 11:30:28 +01:00
Reworked declare incident escalation step (#5130)
Reworked https://github.com/grafana/oncall/pull/5047. Main update is the
switch from FK to a [M2M
relation](https://docs.google.com/document/d/1HeulqxoFShSHtInQrZNJLL5MDlHPNT50rVGaK3zZWvw/edit?disco=AAABVLjV4W8)
(which doesn't really change the original/intended behavior, besides not
needing to alter the alert group table, and it is a bit more flexible;
the extra table shouldn't introduce issues because this is used only for
tracking purposes and the information needed in the log record is
already there).
Avoid a db migration involving alert group table:
```
--
-- Create model RelatedIncident
--
CREATE TABLE `alerts_relatedincident` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `incident_id` varchar(50) NOT NULL, `created_at` datetime(6) NOT NULL, `is_active` bool NOT NULL, `channel_filter_id` bigint NULL, `organization_id` bigint NOT NULL);
CREATE TABLE `alerts_relatedincident_attached_alert_groups` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `relatedincident_id` bigint NOT NULL, `alertgroup_id` bigint NOT NULL);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincident_organization_id_incident_id_d7fc9a4f_uniq` UNIQUE (`organization_id`, `incident_id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_channel_filter_id_9556c836_fk_alerts_ch` FOREIGN KEY (`channel_filter_id`) REFERENCES `alerts_channelfilter` (`id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_organization_id_74ed6bed_fk_user_mana` FOREIGN KEY (`organization_id`) REFERENCES `user_management_organization` (`id`);
CREATE INDEX `alerts_relatedincident_incident_id_8356a799` ON `alerts_relatedincident` (`incident_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincident_a_relatedincident_id_alert_3d683baa_uniq` UNIQUE (`relatedincident_id`, `alertgroup_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_relatedincident_id_3e5e7a23_fk_alerts_re` FOREIGN KEY (`relatedincident_id`) REFERENCES `alerts_relatedincident` (`id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_alertgroup_id_0125deca_fk_alerts_al` FOREIGN KEY (`alertgroup_id`) REFERENCES `alerts_alertgroup` (`id`);
```
2024-10-07 16:26:10 -03:00
if is_labels_feature_enabled ( organization ) :
2023-11-06 10:31:12 +00:00
filter_options . append (
{
" name " : " label " ,
" display_name " : " Label " ,
" type " : " alert_group_labels " ,
}
)
Reworked declare incident escalation step (#5130)
Reworked https://github.com/grafana/oncall/pull/5047. Main update is the
switch from FK to a [M2M
relation](https://docs.google.com/document/d/1HeulqxoFShSHtInQrZNJLL5MDlHPNT50rVGaK3zZWvw/edit?disco=AAABVLjV4W8)
(which doesn't really change the original/intended behavior, besides not
needing to alter the alert group table, and it is a bit more flexible;
the extra table shouldn't introduce issues because this is used only for
tracking purposes and the information needed in the log record is
already there).
Avoid a db migration involving alert group table:
```
--
-- Create model RelatedIncident
--
CREATE TABLE `alerts_relatedincident` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `incident_id` varchar(50) NOT NULL, `created_at` datetime(6) NOT NULL, `is_active` bool NOT NULL, `channel_filter_id` bigint NULL, `organization_id` bigint NOT NULL);
CREATE TABLE `alerts_relatedincident_attached_alert_groups` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, `relatedincident_id` bigint NOT NULL, `alertgroup_id` bigint NOT NULL);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincident_organization_id_incident_id_d7fc9a4f_uniq` UNIQUE (`organization_id`, `incident_id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_channel_filter_id_9556c836_fk_alerts_ch` FOREIGN KEY (`channel_filter_id`) REFERENCES `alerts_channelfilter` (`id`);
ALTER TABLE `alerts_relatedincident` ADD CONSTRAINT `alerts_relatedincide_organization_id_74ed6bed_fk_user_mana` FOREIGN KEY (`organization_id`) REFERENCES `user_management_organization` (`id`);
CREATE INDEX `alerts_relatedincident_incident_id_8356a799` ON `alerts_relatedincident` (`incident_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincident_a_relatedincident_id_alert_3d683baa_uniq` UNIQUE (`relatedincident_id`, `alertgroup_id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_relatedincident_id_3e5e7a23_fk_alerts_re` FOREIGN KEY (`relatedincident_id`) REFERENCES `alerts_relatedincident` (`id`);
ALTER TABLE `alerts_relatedincident_attached_alert_groups` ADD CONSTRAINT `alerts_relatedincide_alertgroup_id_0125deca_fk_alerts_al` FOREIGN KEY (`alertgroup_id`) REFERENCES `alerts_alertgroup` (`id`);
```
2024-10-07 16:26:10 -03:00
if is_declare_incident_step_enabled ( organization ) :
filter_options . append (
{
" name " : " has_related_incident " ,
" type " : " boolean " ,
" default " : " true " ,
}
)
2022-06-03 08:09:47 -06:00
return Response ( filter_options )
2024-01-12 15:11:22 +00:00
@extend_schema (
request = inline_serializer (
name = " AlertGroupBulkActionRequest " ,
fields = {
" alert_group_pks " : serializers . ListField ( child = serializers . CharField ( ) ) ,
" action " : serializers . ChoiceField ( choices = AlertGroup . BULK_ACTIONS ) ,
" delay " : serializers . IntegerField (
required = False , allow_null = True , help_text = " only applicable for silence "
) ,
} ,
)
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " post " ] , detail = False )
def bulk_action ( self , request ) :
2023-11-16 07:15:05 -05:00
"""
Perform a bulk action on a list of alert groups
"""
2024-01-12 15:11:22 +00:00
alert_group_pks = self . request . data . get ( " alert_group_pks " , [ ] )
action_name = self . request . data . get ( " action " , None )
2022-06-03 08:09:47 -06:00
delay = self . request . data . get ( " delay " )
kwargs = { }
2024-01-12 15:11:22 +00:00
if action_name not in AlertGroup . BULK_ACTIONS :
2022-06-03 08:09:47 -06:00
return Response ( " Unknown action " , status = status . HTTP_400_BAD_REQUEST )
2024-01-12 15:11:22 +00:00
if action_name == AlertGroup . SILENCE :
2022-06-03 08:09:47 -06:00
if delay is None :
raise BadRequest ( detail = " Please specify a delay for silence " )
kwargs [ " silence_delay " ] = delay
2023-07-18 13:48:34 +02:00
alert_groups = AlertGroup . objects . filter (
2024-01-12 15:11:22 +00:00
channel__organization = self . request . auth . organization , public_primary_key__in = alert_group_pks
2022-07-27 12:14:59 +01:00
)
2022-06-03 08:09:47 -06:00
kwargs [ " user " ] = self . request . user
kwargs [ " alert_groups " ] = alert_groups
2024-01-12 15:11:22 +00:00
method = getattr ( AlertGroup , f " bulk_ { action_name } " )
2022-06-03 08:09:47 -06:00
method ( * * kwargs )
return Response ( status = status . HTTP_200_OK )
2024-01-12 15:11:22 +00:00
@extend_schema (
responses = inline_serializer (
name = " AlertGroupBulkActionOptions " ,
fields = {
" value " : serializers . ChoiceField ( choices = AlertGroup . BULK_ACTIONS ) ,
" display_name " : serializers . ChoiceField ( choices = AlertGroup . BULK_ACTIONS ) ,
} ,
many = True ,
)
)
2022-06-03 08:09:47 -06:00
@action ( methods = [ " get " ] , detail = False )
def bulk_action_options ( self , request ) :
2023-11-16 07:15:05 -05:00
"""
Retrieve a list of valid bulk action options
"""
2022-06-03 08:09:47 -06:00
return Response (
[ { " value " : action_name , " display_name " : action_name } for action_name in AlertGroup . BULK_ACTIONS ]
)
# This method is required for PreviewTemplateMixin
2023-04-18 11:57:40 +08:00
def get_alert_to_template ( self , payload = None ) :
2022-06-03 08:09:47 -06:00
return self . get_object ( ) . alerts . first ( )
2024-01-10 15:52:59 +01:00
2024-10-24 11:24:36 +02:00
@extend_schema ( responses = AlertGroupEscalationSnapshotAPISerializer )
2024-01-10 15:52:59 +01:00
@action ( methods = [ " get " ] , detail = True )
def escalation_snapshot ( self , request , pk = None ) :
alert_group = self . get_object ( )
escalation_snapshot = alert_group . escalation_snapshot
result = AlertGroupEscalationSnapshotAPISerializer ( escalation_snapshot ) . data if escalation_snapshot else { }
return Response ( result )
2025-01-16 09:19:32 -03:00
@extend_schema (
responses = inline_serializer (
name = " AffectedServices " ,
fields = {
" name " : serializers . CharField ( ) ,
" service_url " : serializers . CharField ( ) ,
" alert_groups_url " : serializers . CharField ( ) ,
} ,
many = True ,
)
)
@action ( methods = [ " get " ] , detail = False )
def filter_affected_services ( self , request ) :
""" Given a list of service names, return the ones that have active alerts. """
2025-01-20 16:49:59 +01:00
if not settings . FEATURE_SERVICE_DEPENDENCIES_ENABLED :
raise NotFound
2025-01-16 09:19:32 -03:00
organization = self . request . auth . organization
services = self . request . query_params . getlist ( " service " , [ ] )
url_builder = UIURLBuilder ( organization )
affected_services = [ ]
days_to_check = 7
for service_name in services :
is_affected = (
self . _get_queryset (
started_at = timezone . now ( ) - timezone . timedelta ( days = days_to_check ) ,
label_query = [ f " service_name: { service_name } " ] ,
)
. filter (
resolved = False ,
silenced = False ,
)
. exists ( )
)
if is_affected :
affected_services . append (
{
" name " : service_name ,
" service_url " : url_builder . service_page ( service_name ) ,
" alert_groups_url " : url_builder . alert_groups (
f " ?status=0&status=1&started_at=now- { days_to_check } d_now&label=service_name: { service_name } "
) ,
}
)
return Response ( affected_services )