remove references to AlertGroup.is_archived and AlertGroup.unarchived_objects (#2524)
# What this PR does This is a follow up to #2502 which started to remove logic to "archiving" alert groups. This PR: - removes all references to `AlertGroup.is_archived` and marks the column as deprecated. We will remove it in the next release - removes the `AlertGroup.unarchived_objects` `Manager` - renames the `AlertGroup.all_objects` `Manager` to `AlertGroup.objects` ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required)
This commit is contained in:
parent
94fd91d6be
commit
9cc74e5b67
53 changed files with 134 additions and 135 deletions
|
|
@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
### Changed
|
||||
|
||||
- Deprecate `AlertGroup.is_archived` column. Column will be removed in a subsequent release. By @joeyorlando ([#2524](https://github.com/grafana/oncall/pull/2524)).
|
||||
- Update Slack "invite" feature to use direct paging by @vadimkerr ([#2562](https://github.com/grafana/oncall/pull/2562))
|
||||
- Change "Current responders" to "Additional Responders" in web UI by @vadimkerr ([#2567](https://github.com/grafana/oncall/pull/2567))
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class AlertGroupAdmin(CustomModelAdmin):
|
|||
list_filter = ("started_at",)
|
||||
|
||||
def get_queryset(self, request):
|
||||
return AlertGroup.all_objects
|
||||
return AlertGroup.objects
|
||||
|
||||
|
||||
@admin.register(AlertGroupLogRecord)
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ class EscalationSnapshotMixin:
|
|||
)
|
||||
task_id = celery_uuid()
|
||||
|
||||
AlertGroup.all_objects.filter(pk=self.pk,).update(
|
||||
AlertGroup.objects.filter(pk=self.pk,).update(
|
||||
active_escalation_id=task_id,
|
||||
is_escalation_finished=False,
|
||||
raw_escalation_snapshot=raw_escalation_snapshot,
|
||||
|
|
|
|||
31
engine/apps/alerts/migrations/0023_auto_20230718_0952.py
Normal file
31
engine/apps/alerts/migrations/0023_auto_20230718_0952.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
# Generated by Django 3.2.20 on 2023-07-18 09:52
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('alerts', '0022_alter_alertgroup_manual_severity'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelManagers(
|
||||
name='alertgroup',
|
||||
managers=[
|
||||
],
|
||||
),
|
||||
migrations.RemoveIndex(
|
||||
model_name='alertgroup',
|
||||
name='alerts_aler_channel_ee84a7_idx',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='alertgroup',
|
||||
name='is_archived',
|
||||
field=models.BooleanField(default=False, null=True),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='alertgroup',
|
||||
index=models.Index(fields=['channel_id', 'resolved', 'acknowledged', 'silenced', 'root_alert_group_id'], name='alerts_aler_channel_81aeec_idx'),
|
||||
),
|
||||
]
|
||||
|
|
@ -91,7 +91,7 @@ class Alert(models.Model):
|
|||
if channel_filter is None:
|
||||
channel_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, force_route_id)
|
||||
|
||||
group, group_created = AlertGroup.all_objects.get_or_create_grouping(
|
||||
group, group_created = AlertGroup.objects.get_or_create_grouping(
|
||||
channel=alert_receive_channel,
|
||||
channel_filter=channel_filter,
|
||||
group_data=group_data,
|
||||
|
|
@ -134,7 +134,7 @@ class Alert(models.Model):
|
|||
|
||||
if maintenance_uuid is not None:
|
||||
try:
|
||||
maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=maintenance_uuid)
|
||||
maintenance_incident = AlertGroup.objects.get(maintenance_uuid=maintenance_uuid)
|
||||
group.root_alert_group = maintenance_incident
|
||||
group.save(update_fields=["root_alert_group"])
|
||||
log_record_for_root_incident = maintenance_incident.log_records.create(
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ def generate_public_primary_key_for_alert_group():
|
|||
new_public_primary_key = generate_public_primary_key(prefix)
|
||||
|
||||
failure_counter = 0
|
||||
while AlertGroup.all_objects.filter(public_primary_key=new_public_primary_key).exists():
|
||||
while AlertGroup.objects.filter(public_primary_key=new_public_primary_key).exists():
|
||||
new_public_primary_key = increase_public_primary_key_length(
|
||||
failure_counter=failure_counter, prefix=prefix, model_name="AlertGroup"
|
||||
)
|
||||
|
|
@ -111,11 +111,6 @@ class AlertGroupQuerySet(models.QuerySet):
|
|||
raise
|
||||
|
||||
|
||||
class UnarchivedAlertGroupQuerySet(models.QuerySet):
|
||||
def filter(self, *args, **kwargs):
|
||||
return super().filter(*args, **kwargs, is_archived=False)
|
||||
|
||||
|
||||
class AlertGroupSlackRenderingMixin:
|
||||
"""
|
||||
Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
|
||||
|
|
@ -140,8 +135,7 @@ class AlertGroupSlackRenderingMixin:
|
|||
class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
|
||||
log_records: "RelatedManager['AlertGroupLogRecord']"
|
||||
|
||||
all_objects = AlertGroupQuerySet.as_manager()
|
||||
unarchived_objects = UnarchivedAlertGroupQuerySet.as_manager()
|
||||
objects = AlertGroupQuerySet.as_manager()
|
||||
|
||||
(
|
||||
NEW,
|
||||
|
|
@ -330,7 +324,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
# NOTE: we should probably migrate this field to models.UUIDField as it's ONLY ever being
|
||||
# set to the result of uuid.uuid1
|
||||
last_unique_unacknowledge_process_id: UUID | None = models.CharField(max_length=100, null=True, default=None)
|
||||
is_archived = models.BooleanField(default=False)
|
||||
is_archived = deprecate_field(models.BooleanField(default=False))
|
||||
|
||||
wiped_at = models.DateTimeField(null=True, default=None)
|
||||
wiped_by = models.ForeignKey(
|
||||
|
|
@ -414,9 +408,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
"is_open_for_grouping",
|
||||
]
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id", "is_archived"]
|
||||
),
|
||||
models.Index(fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id"]),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
|
|
@ -1185,7 +1177,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
"is_escalation_finished",
|
||||
"response_time",
|
||||
]
|
||||
AlertGroup.all_objects.bulk_update(alert_groups_to_acknowledge_list, fields=fields_to_update, batch_size=100)
|
||||
AlertGroup.objects.bulk_update(alert_groups_to_acknowledge_list, fields=fields_to_update, batch_size=100)
|
||||
|
||||
for alert_group in alert_groups_to_unresolve_before_acknowledge_list:
|
||||
alert_group.log_records.create(
|
||||
|
|
@ -1226,9 +1218,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
# Find all dependent alert_groups to update them in one query
|
||||
# convert qs to list to prevent changes by update
|
||||
root_alert_group_pks = list(root_alert_groups_to_acknowledge.values_list("pk", flat=True))
|
||||
dependent_alert_groups_to_acknowledge = AlertGroup.unarchived_objects.filter(
|
||||
root_alert_group__pk__in=root_alert_group_pks
|
||||
)
|
||||
dependent_alert_groups_to_acknowledge = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
with transaction.atomic():
|
||||
AlertGroup._bulk_acknowledge(user, root_alert_groups_to_acknowledge)
|
||||
AlertGroup._bulk_acknowledge(user, dependent_alert_groups_to_acknowledge)
|
||||
|
|
@ -1273,7 +1263,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
"is_escalation_finished",
|
||||
"response_time",
|
||||
]
|
||||
AlertGroup.all_objects.bulk_update(alert_groups_to_resolve_list, fields=fields_to_update, batch_size=100)
|
||||
AlertGroup.objects.bulk_update(alert_groups_to_resolve_list, fields=fields_to_update, batch_size=100)
|
||||
|
||||
for alert_group in alert_groups_to_unsilence_before_resolve_list:
|
||||
alert_group.log_records.create(
|
||||
|
|
@ -1315,7 +1305,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
)
|
||||
# convert qs to list to prevent changes by update
|
||||
root_alert_group_pks = list(root_alert_groups_to_resolve.values_list("pk", flat=True))
|
||||
dependent_alert_groups_to_resolve = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
dependent_alert_groups_to_resolve = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
with transaction.atomic():
|
||||
AlertGroup._bulk_resolve(user, root_alert_groups_to_resolve)
|
||||
AlertGroup._bulk_resolve(user, dependent_alert_groups_to_resolve)
|
||||
|
|
@ -1455,7 +1445,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
)
|
||||
# convert qs to list to prevent changes by update
|
||||
root_alert_group_pks = list(root_alert_groups_unack.values_list("pk", flat=True))
|
||||
dependent_alert_groups_unack = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
dependent_alert_groups_unack = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
with transaction.atomic():
|
||||
AlertGroup._bulk_restart_unack(user, root_alert_groups_unack)
|
||||
AlertGroup._bulk_restart_unack(user, dependent_alert_groups_unack)
|
||||
|
|
@ -1463,7 +1453,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
root_alert_groups_unresolve = alert_groups.filter(resolved=True, root_alert_group__isnull=True)
|
||||
# convert qs to list to prevent changes by update
|
||||
root_alert_group_pks = list(root_alert_groups_unresolve.values_list("pk", flat=True))
|
||||
dependent_alert_groups_unresolve = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
dependent_alert_groups_unresolve = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
|
||||
with transaction.atomic():
|
||||
AlertGroup._bulk_restart_unresolve(user, root_alert_groups_unresolve)
|
||||
AlertGroup._bulk_restart_unresolve(user, dependent_alert_groups_unresolve)
|
||||
|
|
@ -1538,7 +1528,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
"is_escalation_finished",
|
||||
"response_time",
|
||||
]
|
||||
AlertGroup.all_objects.bulk_update(alert_groups_to_silence_list, fields=fields_to_update, batch_size=100)
|
||||
AlertGroup.objects.bulk_update(alert_groups_to_silence_list, fields=fields_to_update, batch_size=100)
|
||||
|
||||
# create log records
|
||||
for alert_group in alert_groups_to_unresolve_before_silence_list:
|
||||
|
|
@ -1725,12 +1715,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
]
|
||||
)
|
||||
|
||||
def archive(self):
|
||||
if self.root_alert_group:
|
||||
self.root_alert_group = None
|
||||
self.is_archived = True
|
||||
self.save(update_fields=["is_archived", "root_alert_group"])
|
||||
|
||||
@property
|
||||
def long_verbose_name(self):
|
||||
title = str_or_backup(self.slack_templated_first_alert.title, DEFAULT_BACKUP_TITLE)
|
||||
|
|
@ -1747,8 +1731,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
|
|||
def get_resolve_text(self, mention_user=False):
|
||||
if self.resolved_by == AlertGroup.SOURCE:
|
||||
return "Resolved by alert source"
|
||||
elif self.resolved_by == AlertGroup.ARCHIVED:
|
||||
return "Resolved because alert has been archived"
|
||||
elif self.resolved_by == AlertGroup.LAST_STEP:
|
||||
return "Resolved automatically"
|
||||
elif self.resolved_by == AlertGroup.WIPED:
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ class MaintainableObject(models.Model):
|
|||
self.maintenance_started_at = _self.maintenance_started_at
|
||||
self.maintenance_author = _self.maintenance_author
|
||||
if mode == AlertReceiveChannel.MAINTENANCE:
|
||||
group = AlertGroup.all_objects.create(
|
||||
group = AlertGroup.objects.create(
|
||||
distinction=uuid4(),
|
||||
web_title_cache=f"Maintenance of {verbal} for {maintenance_duration}",
|
||||
maintenance_uuid=maintenance_uuid,
|
||||
|
|
|
|||
|
|
@ -21,9 +21,7 @@ def acknowledge_reminder_task(alert_group_pk, unacknowledge_process_id):
|
|||
task_logger.info(f"Starting a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).select_for_update()[
|
||||
0
|
||||
] # Lock alert_group:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
return f"acknowledge_reminder_task: Alert group with pk {alert_group_pk} doesn't exist"
|
||||
|
||||
|
|
@ -89,17 +87,12 @@ def unacknowledge_timeout_task(alert_group_pk, unacknowledge_process_id):
|
|||
)
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
return f"unacknowledge_timeout_task: Alert group with pk {alert_group_pk} doesn't exist"
|
||||
|
||||
if unacknowledge_process_id == alert_group.last_unique_unacknowledge_process_id:
|
||||
if (
|
||||
not alert_group.resolved
|
||||
and not alert_group.is_archived
|
||||
and alert_group.acknowledged
|
||||
and alert_group.is_root_alert_group
|
||||
):
|
||||
if not alert_group.resolved and alert_group.acknowledged and alert_group.is_root_alert_group:
|
||||
if not alert_group.acknowledged_by_confirmed:
|
||||
log_record = AlertGroupLogRecord(
|
||||
type=AlertGroupLogRecord.TYPE_AUTO_UN_ACK,
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ def update_web_title_cache_for_alert_receive_channel(alert_receive_channel_pk):
|
|||
|
||||
countdown = 0
|
||||
cursor = 0
|
||||
queryset = AlertGroup.all_objects.filter(channel_id=alert_receive_channel_pk)
|
||||
queryset = AlertGroup.objects.filter(channel_id=alert_receive_channel_pk)
|
||||
ids = batch_ids(queryset, cursor)
|
||||
|
||||
while ids:
|
||||
|
|
@ -57,7 +57,7 @@ def update_web_title_cache(alert_receive_channel_pk, alert_group_pks):
|
|||
task_logger.warning(f"AlertReceiveChannel {alert_receive_channel_pk} doesn't exist")
|
||||
return
|
||||
|
||||
alert_groups = AlertGroup.all_objects.filter(pk__in=alert_group_pks).only("pk")
|
||||
alert_groups = AlertGroup.objects.filter(pk__in=alert_group_pks).only("pk")
|
||||
|
||||
# get first alerts in 2 SQL queries
|
||||
alerts_info = (
|
||||
|
|
@ -84,4 +84,4 @@ def update_web_title_cache(alert_receive_channel_pk, alert_group_pks):
|
|||
|
||||
alert_group.web_title_cache = web_title_cache
|
||||
|
||||
AlertGroup.all_objects.bulk_update(alert_groups, ["web_title_cache"])
|
||||
AlertGroup.objects.bulk_update(alert_groups, ["web_title_cache"])
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
|||
def call_ack_url(ack_url, alert_group_pk, channel, http_method="GET"):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
SlackMessage = apps.get_model("slack", "SlackMessage")
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk)[0]
|
||||
is_successful, result_message = request_outgoing_webhook(ack_url, http_method)
|
||||
|
||||
if is_successful:
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ def check_escalation_finished_task() -> None:
|
|||
now = timezone.now()
|
||||
two_days_ago = now - datetime.timedelta(days=2)
|
||||
|
||||
alert_groups = AlertGroup.all_objects.using(get_random_readonly_database_key_if_present_otherwise_default()).filter(
|
||||
alert_groups = AlertGroup.objects.using(get_random_readonly_database_key_if_present_otherwise_default()).filter(
|
||||
~Q(silenced=True, silenced_until__isnull=True), # filter silenced forever alert_groups
|
||||
# here we should query maintenance_uuid rather than joining on channel__integration
|
||||
# and checking for something like ~Q(channel__integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ def custom_button_result(custom_button_pk, alert_group_pk, user_pk=None, escalat
|
|||
task_logger.info(f"Custom_button {custom_button_pk} for alert_group {alert_group_pk} does not exist")
|
||||
return
|
||||
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk)[0]
|
||||
escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
|
||||
task_logger.debug(
|
||||
f"Start getting data for request in custom_button_result task for alert_group {alert_group_pk}, "
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ logger = get_task_logger(__name__)
|
|||
def delete_alert_group(alert_group_pk, user_pk):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
User = apps.get_model("user_management", "User")
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).first()
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).first()
|
||||
if not alert_group:
|
||||
logger.debug("Alert group not found, skipping delete_alert_group")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def distribute_alert(alert_id):
|
|||
send_alert_create_signal.apply_async((alert_id,))
|
||||
# If it's the first alert, let's launch the escalation!
|
||||
if alert.is_the_first_alert_in_group:
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert.group_id).get()
|
||||
alert_group = AlertGroup.objects.filter(pk=alert.group_id).get()
|
||||
alert_group.start_escalation_if_needed(countdown=TASK_DELAY_SECONDS)
|
||||
alert_group_escalation_snapshot_built.send(sender=distribute_alert, alert_group=alert_group)
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def escalate_alert_group(alert_group_pk):
|
|||
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
return f"Alert group with pk {alert_group_pk} doesn't exist"
|
||||
|
||||
|
|
@ -49,12 +49,6 @@ def escalate_alert_group(alert_group_pk):
|
|||
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
|
||||
return "Alert is dependent on another. No need to activate escalation."
|
||||
|
||||
if alert_group.is_archived:
|
||||
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
|
||||
return "Escalation stopped. Reason: incident is archived. Escalation id: {}".format(
|
||||
alert_group.active_escalation_id
|
||||
)
|
||||
|
||||
if alert_group.wiped_at is not None:
|
||||
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
|
||||
return "Alert is wiped. No need to activate escalation."
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ def invite_user_to_join_incident(invitation_pk):
|
|||
except IndexError:
|
||||
return f"invite_user_to_join_incident: Invitation with pk {invitation_pk} doesn't exist"
|
||||
|
||||
if not invitation.is_active or invitation.alert_group.is_archived:
|
||||
if not invitation.is_active:
|
||||
return None
|
||||
if invitation.attempts_left <= 0 or invitation.alert_group.resolved:
|
||||
invitation.is_active = False
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ def disable_maintenance(*args, **kwargs):
|
|||
write_maintenance_insight_log(object_under_maintenance, user, MaintenanceEvent.FINISHED)
|
||||
if object_under_maintenance.maintenance_mode == object_under_maintenance.MAINTENANCE:
|
||||
mode_verbal = "Maintenance"
|
||||
maintenance_incident = AlertGroup.all_objects.get(
|
||||
maintenance_incident = AlertGroup.objects.get(
|
||||
maintenance_uuid=object_under_maintenance.maintenance_uuid
|
||||
)
|
||||
transaction.on_commit(maintenance_incident.resolve_by_disable_maintenance)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ def notify_all_task(alert_group_pk, escalation_policy_snapshot_order=None):
|
|||
EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
# check alert group state before notifying all users in the channel
|
||||
if alert_group.resolved or alert_group.acknowledged or alert_group.silenced:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ def notify_group_task(alert_group_pk, escalation_policy_snapshot_order=None):
|
|||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
EscalationDeliveryStep = scenario_step.ScenarioStep.get_step("escalation_delivery", "EscalationDeliveryStep")
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
# check alert group state before notifying all users in the group
|
||||
if alert_group.resolved or alert_group.acknowledged or alert_group.silenced:
|
||||
task_logger.info(f"alert_group {alert_group.pk} was resolved, acked or silenced. No need to notify group")
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ def notify_user_task(
|
|||
UserHasNotification = apps.get_model("alerts", "UserHasNotification")
|
||||
|
||||
try:
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
except AlertGroup.DoesNotExist:
|
||||
return f"notify_user_task: alert_group {alert_group_pk} doesn't exist"
|
||||
|
||||
|
|
@ -127,11 +127,10 @@ def notify_user_task(
|
|||
if (
|
||||
(alert_group.acknowledged and not notify_even_acknowledged)
|
||||
or alert_group.resolved
|
||||
or alert_group.is_archived
|
||||
or alert_group.wiped_at
|
||||
or alert_group.root_alert_group
|
||||
):
|
||||
return "Acknowledged, resolved, archived, attached or wiped."
|
||||
return "Acknowledged, resolved, attached or wiped."
|
||||
|
||||
if alert_group.silenced and not notify_anyway:
|
||||
task_logger.info(
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ def resolve_alert_group_by_source_if_needed(alert_group_pk):
|
|||
AlertGroupForAlertManager = apps.get_model("alerts", "AlertGroupForAlertManager")
|
||||
AlertForAlertManager = apps.get_model("alerts", "AlertForAlertManager")
|
||||
|
||||
alert_group = AlertGroupForAlertManager.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroupForAlertManager.objects.get(pk=alert_group_pk)
|
||||
|
||||
if not resolve_alert_group_by_source_if_needed.request.id == alert_group.active_resolve_calculation_id:
|
||||
return "Resolve calculation celery ID mismatch. Duplication or non-active. Active: {}".format(
|
||||
|
|
|
|||
|
|
@ -9,5 +9,5 @@ from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
|||
)
|
||||
def resolve_by_last_step_task(alert_group_pk):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
alert_group.resolve_by_last_step()
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ def send_update_log_report_signal(log_record_pk=None, alert_group_pk=None):
|
|||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(id=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(id=alert_group_pk)
|
||||
if alert_group.is_maintenance_incident:
|
||||
task_logger.debug(
|
||||
f'send_update_log_report_signal: alert_group={alert_group_pk} msg="skip alert_group_update_log_report_signal, alert group is maintenance incident "'
|
||||
|
|
|
|||
|
|
@ -1,9 +1,13 @@
|
|||
import logging
|
||||
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
|
||||
from apps.alerts.signals import alert_group_update_resolution_note_signal
|
||||
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(
|
||||
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
|
||||
|
|
@ -13,9 +17,9 @@ def send_update_resolution_note_signal(alert_group_pk, resolution_note_pk):
|
|||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
ResolutionNote = apps.get_model("alerts", "ResolutionNote")
|
||||
|
||||
alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).first()
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).first()
|
||||
if alert_group is None:
|
||||
print("Sent signal to update resolution note, but alert group is archived or does not exist")
|
||||
logger.info("Sent signal to update resolution note, but alert group does not exist")
|
||||
return
|
||||
|
||||
resolution_note = ResolutionNote.objects_with_deleted.get(pk=resolution_note_pk)
|
||||
|
|
|
|||
|
|
@ -18,9 +18,7 @@ def unsilence_task(alert_group_pk):
|
|||
task_logger.info(f"Start unsilence_task for alert_group {alert_group_pk}")
|
||||
with transaction.atomic():
|
||||
try:
|
||||
alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).select_for_update()[
|
||||
0
|
||||
] # Lock alert_group:
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
|
||||
except IndexError:
|
||||
task_logger.info(f"unsilence_task. alert_group {alert_group_pk} doesn't exist")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -10,6 +10,6 @@ from common.custom_celery_tasks import shared_dedicated_queue_retry_task
|
|||
def wipe(alert_group_pk, user_pk):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
User = apps.get_model("user_management", "User")
|
||||
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).first()
|
||||
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).first()
|
||||
user = User.objects.filter(pk=user_pk).first()
|
||||
alert_group.wipe_by_user(user)
|
||||
|
|
|
|||
|
|
@ -120,7 +120,6 @@ def test_get_acknowledge_text(
|
|||
"source,expected_text",
|
||||
[
|
||||
(AlertGroup.SOURCE, "Resolved by alert source"),
|
||||
(AlertGroup.ARCHIVED, "Resolved because alert has been archived"),
|
||||
(AlertGroup.LAST_STEP, "Resolved automatically"),
|
||||
(AlertGroup.WIPED, "Resolved by wipe"),
|
||||
(AlertGroup.DISABLE_MAINTENANCE, "Resolved by stop maintenance"),
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ def test_alert_attached_to_maintenance_incident_integration(
|
|||
duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
|
||||
|
||||
alert_receive_channel.start_maintenance(mode, duration, user)
|
||||
maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
|
||||
maintenance_incident = AlertGroup.objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
|
||||
|
||||
alert = make_alert_with_custom_create_method(
|
||||
title="test_title",
|
||||
|
|
@ -132,7 +132,7 @@ def test_stop_maintenance(
|
|||
duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
|
||||
|
||||
alert_receive_channel.start_maintenance(mode, duration, user)
|
||||
maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
|
||||
maintenance_incident = AlertGroup.objects.get(maintenance_uuid=alert_receive_channel.maintenance_uuid)
|
||||
alert = make_alert_with_custom_create_method(
|
||||
title="test_title",
|
||||
message="test_message",
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ def test_direct_paging_user(make_organization, make_user_for_organization):
|
|||
)
|
||||
|
||||
# alert group created
|
||||
alert_groups = AlertGroup.all_objects.all()
|
||||
alert_groups = AlertGroup.objects.all()
|
||||
assert alert_groups.count() == 1
|
||||
ag = alert_groups.get()
|
||||
alert = ag.alerts.get()
|
||||
|
|
@ -172,7 +172,7 @@ def test_direct_paging_schedule(
|
|||
direct_paging(organization, None, from_user, schedules=[(schedule, False), (other_schedule, True)])
|
||||
|
||||
# alert group created
|
||||
alert_groups = AlertGroup.all_objects.all()
|
||||
alert_groups = AlertGroup.objects.all()
|
||||
assert alert_groups.count() == 1
|
||||
ag = alert_groups.get()
|
||||
alert = ag.alerts.get()
|
||||
|
|
@ -203,7 +203,7 @@ def test_direct_paging_reusing_alert_group(
|
|||
direct_paging(organization, None, from_user, users=[(user, False)], alert_group=alert_group)
|
||||
|
||||
# no new alert group is created
|
||||
alert_groups = AlertGroup.all_objects.all()
|
||||
alert_groups = AlertGroup.objects.all()
|
||||
assert alert_groups.count() == 1
|
||||
assert_log_record(alert_group, f"{from_user.username} paged user {user.username}")
|
||||
# notifications sent
|
||||
|
|
@ -236,7 +236,7 @@ def test_direct_paging_custom_chain(
|
|||
direct_paging(organization, None, from_user, escalation_chain=custom_chain)
|
||||
|
||||
# alert group created
|
||||
alert_groups = AlertGroup.all_objects.all()
|
||||
alert_groups = AlertGroup.objects.all()
|
||||
assert alert_groups.count() == 1
|
||||
ag = alert_groups.get()
|
||||
channel_filter = ag.channel_filter_with_respect_to_escalation_snapshot
|
||||
|
|
@ -256,7 +256,7 @@ def test_direct_paging_returns_alert_group(make_organization, make_user_for_orga
|
|||
alert_group = direct_paging(organization, None, from_user, title="Help!", message="Fire", users=[(user, False)])
|
||||
|
||||
# check alert group returned by direct paging is the same as the one created
|
||||
assert alert_group == AlertGroup.all_objects.get()
|
||||
assert alert_group == AlertGroup.objects.get()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
|
@ -305,7 +305,7 @@ def test_direct_paging_always_create_group(make_organization, make_user_for_orga
|
|||
direct_paging(organization, None, from_user, title="Help!", users=[(user, False)])
|
||||
|
||||
# alert group created
|
||||
alert_groups = AlertGroup.all_objects.all()
|
||||
alert_groups = AlertGroup.objects.all()
|
||||
assert alert_groups.count() == 2
|
||||
# notifications sent
|
||||
assert notify_task.apply_async.called_with((user.pk, alert_groups[0].pk), {"important": False})
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ class DirectPagingSerializer(serializers.Serializer):
|
|||
|
||||
if alert_group_id:
|
||||
try:
|
||||
attrs["alert_group"] = AlertGroup.unarchived_objects.get(
|
||||
attrs["alert_group"] = AlertGroup.objects.get(
|
||||
public_primary_key=alert_group_id, channel__organization=organization
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
|||
id = serializers.CharField(read_only=True, source="public_primary_key")
|
||||
alert_group = OrganizationFilteredPrimaryKeyRelatedField(
|
||||
filter_field="channel__organization",
|
||||
queryset=AlertGroup.unarchived_objects,
|
||||
queryset=AlertGroup.objects,
|
||||
)
|
||||
text = serializers.CharField(allow_null=False, source="message_text")
|
||||
author = FastUserSerializer(read_only=True)
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ class AlertGroupTeamFilteringMixin(TeamFilteringMixin):
|
|||
organization_id=self.request.auth.organization.id,
|
||||
).values_list("id", flat=True)
|
||||
)
|
||||
queryset = AlertGroup.unarchived_objects.filter(
|
||||
queryset = AlertGroup.objects.filter(
|
||||
channel__in=alert_receive_channels_ids,
|
||||
).only("public_primary_key")
|
||||
|
||||
|
|
@ -331,7 +331,7 @@ class AlertGroupView(
|
|||
|
||||
alert_receive_channels_ids = list(alert_receive_channels_qs.values_list("id", flat=True))
|
||||
|
||||
queryset = AlertGroup.unarchived_objects.filter(
|
||||
queryset = AlertGroup.objects.filter(
|
||||
channel__in=alert_receive_channels_ids,
|
||||
)
|
||||
|
||||
|
|
@ -362,7 +362,7 @@ class AlertGroupView(
|
|||
|
||||
# enrich alert groups with select_related and prefetch_related
|
||||
alert_group_pks = [alert_group.pk for alert_group in alert_groups]
|
||||
queryset = AlertGroup.all_objects.filter(pk__in=alert_group_pks).order_by("-pk")
|
||||
queryset = AlertGroup.objects.filter(pk__in=alert_group_pks).order_by("-pk")
|
||||
|
||||
queryset = self.get_serializer_class().setup_eager_loading(queryset)
|
||||
alert_groups = list(queryset)
|
||||
|
|
@ -688,7 +688,7 @@ class AlertGroupView(
|
|||
raise BadRequest(detail="Please specify a delay for silence")
|
||||
kwargs["silence_delay"] = delay
|
||||
|
||||
alert_groups = AlertGroup.unarchived_objects.filter(
|
||||
alert_groups = AlertGroup.objects.filter(
|
||||
channel__organization=self.request.auth.organization, public_primary_key__in=alert_group_public_pks
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class RouteRegexDebuggerView(APIView):
|
|||
MAX_INCIDENTS_TO_SHOW = 5
|
||||
INCIDENTS_TO_LOOKUP = 100
|
||||
for ag in (
|
||||
AlertGroup.unarchived_objects.prefetch_related(Prefetch("alerts", queryset=Alert.objects.order_by("pk")))
|
||||
AlertGroup.objects.prefetch_related(Prefetch("alerts", queryset=Alert.objects.order_by("pk")))
|
||||
.filter(channel__organization=organization, channel__team=team)
|
||||
.order_by("-started_at")[:INCIDENTS_TO_LOOKUP]
|
||||
):
|
||||
|
|
|
|||
|
|
@ -14,6 +14,6 @@ class RetrieveViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
|
|||
|
||||
class AlertGroupsView(RetrieveViewSet):
|
||||
authentication_classes = (GrafanaIncidentStaticKeyAuth,)
|
||||
queryset = AlertGroup.unarchived_objects.all()
|
||||
queryset = AlertGroup.objects.all()
|
||||
serializer_class = AlertGroupSerializer
|
||||
lookup_field = "public_primary_key"
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ def notify_user_async(user_pk, alert_group_pk, notification_policy_pk):
|
|||
return
|
||||
|
||||
try:
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
except AlertGroup.DoesNotExist:
|
||||
logger.warning(f"Alert group {alert_group_pk} does not exist")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ def notify_user_async(user_pk, alert_group_pk, notification_policy_pk, critical)
|
|||
return
|
||||
|
||||
try:
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
except AlertGroup.DoesNotExist:
|
||||
logger.warning(f"Alert group {alert_group_pk} does not exist")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from common.api_helpers.mixins import EagerLoadingMixin
|
|||
class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
|
||||
id = serializers.CharField(read_only=True, source="public_primary_key")
|
||||
alert_group_id = OrganizationFilteredPrimaryKeyRelatedField(
|
||||
queryset=AlertGroup.unarchived_objects,
|
||||
queryset=AlertGroup.objects,
|
||||
source="alert_group",
|
||||
filter_field="channel__organization",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ def incident_public_api_setup(
|
|||
@pytest.mark.django_db
|
||||
def test_get_incidents(incident_public_api_setup):
|
||||
token, _, _, _ = incident_public_api_setup
|
||||
incidents = AlertGroup.unarchived_objects.all().order_by("-started_at")
|
||||
incidents = AlertGroup.objects.all().order_by("-started_at")
|
||||
client = APIClient()
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
|
||||
|
|
@ -110,7 +110,7 @@ def test_get_incidents_filter_by_integration(
|
|||
):
|
||||
token, incidents, integrations, _ = incident_public_api_setup
|
||||
formatted_webhook = integrations[1]
|
||||
incidents = AlertGroup.unarchived_objects.filter(channel=formatted_webhook).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(channel=formatted_webhook).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
@ -128,7 +128,7 @@ def test_get_incidents_filter_by_state_new(
|
|||
incident_public_api_setup,
|
||||
):
|
||||
token, _, _, _ = incident_public_api_setup
|
||||
incidents = AlertGroup.unarchived_objects.filter(AlertGroup.get_new_state_filter()).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(AlertGroup.get_new_state_filter()).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
@ -144,7 +144,7 @@ def test_get_incidents_filter_by_state_acknowledged(
|
|||
incident_public_api_setup,
|
||||
):
|
||||
token, _, _, _ = incident_public_api_setup
|
||||
incidents = AlertGroup.unarchived_objects.filter(AlertGroup.get_acknowledged_state_filter()).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(AlertGroup.get_acknowledged_state_filter()).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ def test_get_incidents_filter_by_state_silenced(
|
|||
incident_public_api_setup,
|
||||
):
|
||||
token, _, _, _ = incident_public_api_setup
|
||||
incidents = AlertGroup.unarchived_objects.filter(AlertGroup.get_silenced_state_filter()).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(AlertGroup.get_silenced_state_filter()).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
@ -176,7 +176,7 @@ def test_get_incidents_filter_by_state_resolved(
|
|||
incident_public_api_setup,
|
||||
):
|
||||
token, _, _, _ = incident_public_api_setup
|
||||
incidents = AlertGroup.unarchived_objects.filter(AlertGroup.get_resolved_state_filter()).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(AlertGroup.get_resolved_state_filter()).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ def test_get_incidents_filter_by_route(
|
|||
):
|
||||
token, incidents, integrations, routes = incident_public_api_setup
|
||||
grafana_non_default_route = routes[1]
|
||||
incidents = AlertGroup.unarchived_objects.filter(channel_filter=grafana_non_default_route).order_by("-started_at")
|
||||
incidents = AlertGroup.objects.filter(channel_filter=grafana_non_default_route).order_by("-started_at")
|
||||
expected_response = construct_expected_response_from_incidents(incidents)
|
||||
client = APIClient()
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class IncidentView(RateLimitHeadersMixin, mixins.ListModelMixin, mixins.DestroyM
|
|||
integration_id = self.request.query_params.get("integration_id", None)
|
||||
state = self.request.query_params.get("state", None)
|
||||
|
||||
queryset = AlertGroup.unarchived_objects.filter(
|
||||
queryset = AlertGroup.objects.filter(
|
||||
channel__organization=self.request.auth.organization,
|
||||
).order_by("-started_at")
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ class IncidentView(RateLimitHeadersMixin, mixins.ListModelMixin, mixins.DestroyM
|
|||
public_primary_key = self.kwargs["pk"]
|
||||
|
||||
try:
|
||||
return AlertGroup.unarchived_objects.filter(
|
||||
return AlertGroup.objects.filter(
|
||||
channel__organization=self.request.auth.organization,
|
||||
).get(public_primary_key=public_primary_key)
|
||||
except AlertGroup.DoesNotExist:
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ def on_alert_group_action_triggered_async(log_record_id):
|
|||
)
|
||||
def on_alert_group_update_log_report_async(alert_group_id):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_id)
|
||||
logger.debug(f"Start on_alert_group_update_log_report for alert_group {alert_group_id}")
|
||||
organization = alert_group.channel.organization
|
||||
if alert_group.slack_message and organization.slack_team_identity:
|
||||
|
|
@ -164,7 +164,7 @@ class AlertGroupSlackRepresentative(AlertGroupAbstractRepresentative):
|
|||
alert_group_id = alert_group.pk
|
||||
else:
|
||||
alert_group_id = alert_group
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_id)
|
||||
|
||||
logger.debug(
|
||||
f"Received alert_group_update_log_report signal in SLACK representative for alert_group {alert_group_id}"
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
|
|||
private_metadata = json.loads(payload["view"]["private_metadata"])
|
||||
alert_group_pk = private_metadata["alert_group_pk"]
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
attachments = alert_group.render_slack_attachments()
|
||||
blocks = alert_group.render_slack_blocks()
|
||||
|
|
|
|||
|
|
@ -48,13 +48,13 @@ class AlertShootingStep(scenario_step.ScenarioStep):
|
|||
# do not try to post alert group message to slack if its channel is rate limited
|
||||
if alert.group.channel.is_rate_limited_in_slack:
|
||||
logger.info("Skip posting or updating alert_group in Slack due to rate limit")
|
||||
AlertGroup.all_objects.filter(
|
||||
AlertGroup.objects.filter(
|
||||
pk=alert.group.pk,
|
||||
slack_message_sent=False,
|
||||
).update(slack_message_sent=True, reason_to_skip_escalation=AlertGroup.RATE_LIMITED)
|
||||
return
|
||||
|
||||
num_updated_rows = AlertGroup.all_objects.filter(pk=alert.group.pk, slack_message_sent=False).update(
|
||||
num_updated_rows = AlertGroup.objects.filter(pk=alert.group.pk, slack_message_sent=False).update(
|
||||
slack_message_sent=True
|
||||
)
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ class AlertShootingStep(scenario_step.ScenarioStep):
|
|||
channel_id = alert.group.channel_filter.slack_channel_id_or_general_log_id
|
||||
self._send_first_alert(alert, channel_id)
|
||||
except SlackAPIException as e:
|
||||
AlertGroup.all_objects.filter(pk=alert.group.pk).update(slack_message_sent=False)
|
||||
AlertGroup.objects.filter(pk=alert.group.pk).update(slack_message_sent=False)
|
||||
raise e
|
||||
|
||||
if alert.group.channel.maintenance_mode == AlertReceiveChannel.DEBUG_MAINTENANCE:
|
||||
|
|
@ -373,7 +373,7 @@ class SelectAttachGroupStep(AlertGroupActionsMixin, scenario_step.ScenarioStep):
|
|||
).values_list("id", flat=True)
|
||||
|
||||
alert_groups_queryset = (
|
||||
AlertGroup.unarchived_objects.prefetch_related(
|
||||
AlertGroup.objects.prefetch_related(
|
||||
"alerts",
|
||||
"channel__organization",
|
||||
)
|
||||
|
|
@ -460,11 +460,11 @@ class AttachGroupStep(AlertGroupActionsMixin, scenario_step.ScenarioStep):
|
|||
# submit selection in modal window
|
||||
if payload["type"] == scenario_step.PAYLOAD_TYPE_VIEW_SUBMISSION:
|
||||
alert_group_pk = json.loads(payload["view"]["private_metadata"])["alert_group_pk"]
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
root_alert_group_pk = payload["view"]["state"]["values"][SelectAttachGroupStep.routing_uid()][
|
||||
AttachGroupStep.routing_uid()
|
||||
]["selected_option"]["value"]
|
||||
root_alert_group = AlertGroup.all_objects.get(pk=root_alert_group_pk)
|
||||
root_alert_group = AlertGroup.objects.get(pk=root_alert_group_pk)
|
||||
# old version of attach selection by dropdown
|
||||
else:
|
||||
try:
|
||||
|
|
@ -472,7 +472,7 @@ class AttachGroupStep(AlertGroupActionsMixin, scenario_step.ScenarioStep):
|
|||
except KeyError:
|
||||
root_alert_group_pk = int(payload["actions"][0]["selected_option"]["value"])
|
||||
|
||||
root_alert_group = AlertGroup.all_objects.get(pk=root_alert_group_pk)
|
||||
root_alert_group = AlertGroup.objects.get(pk=root_alert_group_pk)
|
||||
alert_group = self.get_alert_group(slack_team_identity, payload)
|
||||
|
||||
alert_group.attach_by_user(self.user, root_alert_group, action_source=ActionSource.SLACK)
|
||||
|
|
@ -713,7 +713,7 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
|
|||
def process_scenario(self, slack_user_identity, slack_team_identity, payload):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
alert_group_id = payload["actions"][0]["value"].split("_")[1]
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_id)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_id)
|
||||
channel = payload["channel"]["id"]
|
||||
message_ts = payload["message_ts"]
|
||||
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ def _get_selected_schedule_from_payload(payload):
|
|||
def _get_alert_group_from_payload(payload):
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
alert_group_pk = json.loads(payload["view"]["private_metadata"])[ALERT_GROUP_DATA_KEY]
|
||||
return AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
return AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
|
||||
STEPS_ROUTING = [
|
||||
|
|
|
|||
|
|
@ -379,7 +379,7 @@ class ResolutionNoteModalStep(AlertGroupActionsMixin, scenario_step.ScenarioStep
|
|||
if data:
|
||||
# Argument "data" is used when step is called from other step, e.g. AddRemoveThreadMessageStep
|
||||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
alert_group = AlertGroup.all_objects.get(pk=data["alert_group_pk"])
|
||||
alert_group = AlertGroup.objects.get(pk=data["alert_group_pk"])
|
||||
else:
|
||||
# Handle "Add Resolution notes" button click
|
||||
alert_group = self.get_alert_group(slack_team_identity, payload)
|
||||
|
|
@ -686,7 +686,7 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
|
|||
slack_thread_message = None
|
||||
resolution_note = None
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
if slack_message_pk is not None:
|
||||
slack_thread_message = ResolutionNoteSlackMessage.objects.get(pk=slack_message_pk)
|
||||
|
|
|
|||
|
|
@ -31,9 +31,7 @@ class AlertGroupLogSlackRenderer:
|
|||
result = ""
|
||||
|
||||
# check if escalation or invitation active
|
||||
if not (
|
||||
alert_group.resolved or alert_group.is_archived or alert_group.wiped_at or alert_group.root_alert_group
|
||||
):
|
||||
if not (alert_group.resolved or alert_group.wiped_at or alert_group.root_alert_group):
|
||||
escalation_policies_plan = log_builder.get_incident_escalation_plan(for_slack=True)
|
||||
if escalation_policies_plan:
|
||||
result += "\n:arrow_down: :arrow_down: :arrow_down: Plan:\n\n"
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ class AlertGroupActionsMixin:
|
|||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
return AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
return AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
def _get_alert_group_from_message(self, payload: dict) -> AlertGroup | None:
|
||||
"""
|
||||
|
|
@ -134,7 +134,7 @@ class AlertGroupActionsMixin:
|
|||
except (KeyError, TypeError):
|
||||
continue
|
||||
|
||||
return AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
return AlertGroup.objects.get(pk=alert_group_pk)
|
||||
return None
|
||||
|
||||
def _get_alert_group_from_slack_message_in_db(
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ def update_incident_slack_message(slack_team_identity_pk, alert_group_pk):
|
|||
AlertGroup = apps.get_model("alerts", "AlertGroup")
|
||||
|
||||
slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
if alert_group.skip_escalation_in_slack or alert_group.channel.is_rate_limited_in_slack:
|
||||
return "Skip message update in Slack due to rate limit"
|
||||
|
|
@ -73,7 +73,7 @@ def check_slack_message_exists_before_post_message_to_thread(
|
|||
AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
|
||||
EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
|
||||
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
slack_team_identity = alert_group.channel.organization.slack_team_identity
|
||||
# get escalation policy object if it exists to save it in log record
|
||||
escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
|
||||
|
|
@ -144,7 +144,7 @@ def send_message_to_thread_if_bot_not_in_channel(alert_group_pk, slack_team_iden
|
|||
SlackTeamIdentity = apps.get_model("slack", "SlackTeamIdentity")
|
||||
|
||||
slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
sc = SlackClientWithErrorHandling(slack_team_identity.bot_access_token)
|
||||
|
||||
|
|
@ -268,7 +268,7 @@ def post_or_update_log_report_message_task(alert_group_pk, slack_team_identity_p
|
|||
UpdateLogReportMessageStep = ScenarioStep.get_step("distribute_alerts", "UpdateLogReportMessageStep")
|
||||
|
||||
slack_team_identity = SlackTeamIdentity.objects.get(pk=slack_team_identity_pk)
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
step = UpdateLogReportMessageStep(slack_team_identity, alert_group.channel.organization)
|
||||
|
||||
if alert_group.skip_escalation_in_slack or alert_group.channel.is_rate_limited_in_slack:
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class AlertGroupTelegramRepresentative(AlertGroupAbstractRepresentative):
|
|||
logger.info("AlertGroupTelegramRepresentative UPDATE LOG REPORT SIGNAL")
|
||||
alert_group = kwargs["alert_group"]
|
||||
if not isinstance(alert_group, AlertGroup):
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group)
|
||||
|
||||
messages_to_edit = alert_group.telegram_messages.filter(
|
||||
message_type__in=(
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ def send_link_to_channel_message_or_fallback_to_full_alert_group(
|
|||
|
||||
try:
|
||||
user_connector = TelegramToUserConnector.objects.get(pk=user_connector_pk)
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
notification_policy = UserNotificationPolicy.objects.get(pk=notification_policy_pk)
|
||||
|
||||
# probably telegram message just didn't appear in Telegram channel yet
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ class ButtonPressHandler(UpdateHandler):
|
|||
args = CallbackQueryFactory.decode_data(data)
|
||||
|
||||
alert_group_pk = args[0]
|
||||
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
|
||||
|
||||
action_value = args[1]
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ ACTION_TO_TRIGGER_TYPE = {
|
|||
)
|
||||
def alert_group_created(self, alert_group_id):
|
||||
try:
|
||||
alert_group = AlertGroup.unarchived_objects.get(pk=alert_group_id)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_id)
|
||||
except AlertGroup.DoesNotExist:
|
||||
return
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ def alert_group_created(self, alert_group_id):
|
|||
)
|
||||
def alert_group_status_change(self, action_type, alert_group_id, user_id):
|
||||
try:
|
||||
alert_group = AlertGroup.unarchived_objects.get(pk=alert_group_id)
|
||||
alert_group = AlertGroup.objects.get(pk=alert_group_id)
|
||||
except AlertGroup.DoesNotExist:
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ def execute_webhook(webhook_pk, alert_group_id, user_id, escalation_policy_id):
|
|||
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS,
|
||||
).select_related("author")
|
||||
alert_group = (
|
||||
AlertGroup.unarchived_objects.prefetch_related(
|
||||
AlertGroup.objects.prefetch_related(
|
||||
Prefetch("personal_log_records", queryset=personal_log_records, to_attr="sent_notifications")
|
||||
)
|
||||
.select_related("channel")
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ def test_alert_group_created_for_team(
|
|||
|
||||
@pytest.mark.django_db
|
||||
def test_alert_group_created_does_not_exist(make_organization, make_custom_webhook):
|
||||
assert AlertGroup.all_objects.filter(pk=53).first() is None
|
||||
assert AlertGroup.objects.filter(pk=53).first() is None
|
||||
organization = make_organization()
|
||||
# make sure there is a webhook setup
|
||||
make_custom_webhook(organization=organization, trigger_type=Webhook.TRIGGER_ALERT_GROUP_CREATED)
|
||||
|
|
@ -98,7 +98,7 @@ def test_alert_group_status_change(
|
|||
|
||||
@pytest.mark.django_db
|
||||
def test_alert_group_status_change_does_not_exist(make_organization, make_custom_webhook):
|
||||
assert AlertGroup.all_objects.filter(pk=53).first() is None
|
||||
assert AlertGroup.objects.filter(pk=53).first() is None
|
||||
organization = make_organization()
|
||||
# make sure there is a webhook setup
|
||||
make_custom_webhook(organization=organization, trigger_type=Webhook.TRIGGER_ACKNOWLEDGE)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue