diff --git a/.env.example b/.env.example
index 529d3ce9..ad6128a2 100644
--- a/.env.example
+++ b/.env.example
@@ -6,7 +6,7 @@ SLACK_API_TOKEN=
SLACK_API_TOKEN_COMMON=
SLACK_SLASH_COMMAND_NAME=/oncall
-TELEGRAM_WEBHOOK_URL=
+TELEGRAM_WEBHOOK_HOST=
TELEGRAM_TOKEN=
TWILIO_ACCOUNT_SID=
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b31cf786..4d3ada7d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
# Change Log
+## v1.0.13 (2022-07-27)
+- Optimize alert group list view
+- Fix a bug related to Twilio setup
+
## v1.0.12 (2022-07-26)
- Update push-notifications dependency
- Rework how absolute URLs are built
diff --git a/README.md b/README.md
index 48803ef5..36e96a28 100644
--- a/README.md
+++ b/README.md
@@ -38,7 +38,7 @@ GRAFANA_PASSWORD=admin" > .env_hobby
3. Launch services:
```bash
-docker-compose --env-file .env_hobby -f docker-compose.yml up --build -d
+docker-compose --env-file .env_hobby -f docker-compose.yml up -d
```
4. Issue one-time invite token:
diff --git a/engine/apps/alerts/incident_appearance/renderers/base_renderer.py b/engine/apps/alerts/incident_appearance/renderers/base_renderer.py
index 234c8038..f18fd6a3 100644
--- a/engine/apps/alerts/incident_appearance/renderers/base_renderer.py
+++ b/engine/apps/alerts/incident_appearance/renderers/base_renderer.py
@@ -18,9 +18,12 @@ class AlertBaseRenderer(ABC):
class AlertGroupBaseRenderer(ABC):
- def __init__(self, alert_group):
+ def __init__(self, alert_group, alert=None):
+ if alert is None:
+ alert = alert_group.alerts.first()
+
self.alert_group = alert_group
- self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.first())
+ self.alert_renderer = self.alert_renderer_class(alert)
@property
@abstractmethod
diff --git a/engine/apps/alerts/incident_appearance/renderers/classic_markdown_renderer.py b/engine/apps/alerts/incident_appearance/renderers/classic_markdown_renderer.py
index 9759e86b..aa7a059e 100644
--- a/engine/apps/alerts/incident_appearance/renderers/classic_markdown_renderer.py
+++ b/engine/apps/alerts/incident_appearance/renderers/classic_markdown_renderer.py
@@ -20,11 +20,11 @@ class AlertClassicMarkdownRenderer(AlertBaseRenderer):
class AlertGroupClassicMarkdownRenderer(AlertGroupBaseRenderer):
- def __init__(self, alert_group):
- super().__init__(alert_group)
+ def __init__(self, alert_group, alert=None):
+ if alert is None:
+ alert = alert_group.alerts.last()
- # use the last alert to render content
- self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
+ super().__init__(alert_group, alert)
@property
def alert_renderer_class(self):
diff --git a/engine/apps/alerts/incident_appearance/renderers/web_renderer.py b/engine/apps/alerts/incident_appearance/renderers/web_renderer.py
index e68d453c..681f94f5 100644
--- a/engine/apps/alerts/incident_appearance/renderers/web_renderer.py
+++ b/engine/apps/alerts/incident_appearance/renderers/web_renderer.py
@@ -20,11 +20,11 @@ class AlertWebRenderer(AlertBaseRenderer):
class AlertGroupWebRenderer(AlertGroupBaseRenderer):
- def __init__(self, alert_group):
- super().__init__(alert_group)
+ def __init__(self, alert_group, alert=None):
+ if alert is None:
+ alert = alert_group.alerts.last()
- # use the last alert to render content
- self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
+ super().__init__(alert_group, alert)
@property
def alert_renderer_class(self):
diff --git a/engine/apps/alerts/migrations/0006_alertgroup_alerts_aler_channel_ee84a7_idx.py b/engine/apps/alerts/migrations/0006_alertgroup_alerts_aler_channel_ee84a7_idx.py
new file mode 100644
index 00000000..ada7b0da
--- /dev/null
+++ b/engine/apps/alerts/migrations/0006_alertgroup_alerts_aler_channel_ee84a7_idx.py
@@ -0,0 +1,17 @@
+# Generated by Django 3.2.13 on 2022-07-27 10:51
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('alerts', '0005_alertgroup_cached_render_for_web'),
+ ]
+
+ operations = [
+ migrations.AddIndex(
+ model_name='alertgroup',
+ index=models.Index(fields=['channel_id', 'resolved', 'acknowledged', 'silenced', 'root_alert_group_id', 'is_archived'], name='alerts_aler_channel_ee84a7_idx'),
+ ),
+ ]
diff --git a/engine/apps/alerts/models/alert.py b/engine/apps/alerts/models/alert.py
index 3e08e7b9..e5bd504d 100644
--- a/engine/apps/alerts/models/alert.py
+++ b/engine/apps/alerts/models/alert.py
@@ -5,7 +5,7 @@ from uuid import uuid4
from django.apps import apps
from django.conf import settings
from django.core.validators import MinLengthValidator
-from django.db import models, transaction
+from django.db import models
from django.db.models import JSONField
from django.db.models.signals import post_save
@@ -261,9 +261,6 @@ def listen_for_alert_model_save(sender, instance, created, *args, **kwargs):
else:
distribute_alert.apply_async((instance.pk,), countdown=TASK_DELAY_SECONDS)
- logger.info(f"Recalculate AG cache. Reason: save alert model {instance.pk}")
- transaction.on_commit(instance.group.schedule_cache_for_web)
-
# Connect signal to base Alert class
post_save.connect(listen_for_alert_model_save, Alert)
diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py
index 16b2d19b..84a0a9aa 100644
--- a/engine/apps/alerts/models/alert_group.py
+++ b/engine/apps/alerts/models/alert_group.py
@@ -8,12 +8,9 @@ import pytz
from celery import uuid as celery_uuid
from django.apps import apps
from django.conf import settings
-from django.core.cache import cache
from django.core.validators import MinLengthValidator
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, models
from django.db.models import JSONField, Q, QuerySet
-from django.db.models.signals import post_save
-from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
@@ -22,16 +19,9 @@ from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_T
from apps.alerts.incident_appearance.renderers.slack_renderer import AlertGroupSlackRenderer
from apps.alerts.incident_log_builder import IncidentLogBuilder
from apps.alerts.signals import alert_group_action_triggered_signal
-from apps.alerts.tasks import (
- acknowledge_reminder_task,
- call_ack_url,
- schedule_cache_for_alert_group,
- send_alert_group_signal,
- unsilence_task,
-)
+from apps.alerts.tasks import acknowledge_reminder_task, call_ack_url, send_alert_group_signal, unsilence_task
from apps.slack.slack_formatter import SlackFormatter
from apps.user_management.models import User
-from common.mixins.use_random_readonly_db_manager_mixin import UseRandomReadonlyDbManagerMixin
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
from common.utils import clean_markup, str_or_backup
@@ -108,10 +98,6 @@ class UnarchivedAlertGroupQuerySet(models.QuerySet):
return super().filter(*args, **kwargs, is_archived=False)
-class AlertGroupManager(UseRandomReadonlyDbManagerMixin, models.Manager):
- pass
-
-
class AlertGroupSlackRenderingMixin:
"""
Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
@@ -134,8 +120,8 @@ class AlertGroupSlackRenderingMixin:
class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
- all_objects = AlertGroupManager.from_queryset(AlertGroupQuerySet)()
- unarchived_objects = AlertGroupManager.from_queryset(UnarchivedAlertGroupQuerySet)()
+ all_objects = AlertGroupQuerySet.as_manager()
+ unarchived_objects = UnarchivedAlertGroupQuerySet.as_manager()
(
NEW,
@@ -242,8 +228,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
active_escalation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
active_resolve_calculation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
- # ID generated by celery
- active_cache_for_web_calculation_id = models.CharField(max_length=100, null=True, default=None)
SILENCE_DELAY_OPTIONS = (
(1800, "30 minutes"),
@@ -315,7 +299,9 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
related_name="dependent_alert_groups",
)
- cached_render_for_web = JSONField(default=dict)
+ # cached_render_for_web and active_cache_for_web_calculation_id are deprecated
+ cached_render_for_web = models.JSONField(default=dict)
+ active_cache_for_web_calculation_id = models.CharField(max_length=100, null=True, default=None)
last_unique_unacknowledge_process_id = models.CharField(max_length=100, null=True, default=None)
is_archived = models.BooleanField(default=False)
@@ -364,6 +350,11 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
"distinction",
"is_open_for_grouping",
]
+ indexes = [
+ models.Index(
+ fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id", "is_archived"]
+ ),
+ ]
def __str__(self):
return f"{self.pk}: {self.verbose_name}"
@@ -404,18 +395,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
def is_alert_a_resolve_signal(self, alert):
raise NotImplementedError
- def cache_for_web(self, organization):
- from apps.api.serializers.alert_group import AlertGroupSerializer
-
- # Re-take object to switch connection from readonly db to master.
- _self = AlertGroup.all_objects.get(pk=self.pk)
- _self.cached_render_for_web = AlertGroupSerializer(self, context={"organization": organization}).data
- self.cached_render_for_web = _self.cached_render_for_web
- _self.save(update_fields=["cached_render_for_web"])
-
- def schedule_cache_for_web(self):
- schedule_cache_for_alert_group.apply_async((self.pk,))
-
@property
def permalink(self):
if self.slack_message is not None:
@@ -425,10 +404,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
def web_link(self):
return urljoin(self.channel.organization.web_link, f"?page=incident&id={self.public_primary_key}")
- @property
- def alerts_count(self):
- return self.alerts.count()
-
@property
def happened_while_maintenance(self):
return self.root_alert_group is not None and self.root_alert_group.maintenance_uuid is not None
@@ -449,10 +424,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Acknowledge button")
- # clear resolve report cache
- cache_key = "render_after_resolve_report_json_{}".format(self.pk)
- cache.delete(cache_key)
-
self.acknowledge(acknowledged_by_user=user, acknowledged_by=AlertGroup.USER)
self.stop_escalation()
if self.is_root_alert_group:
@@ -673,9 +644,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user)
- # clear resolve report cache
- self.drop_cached_after_resolve_report_json()
-
if self.is_root_alert_group:
self.start_escalation_if_needed()
@@ -848,10 +816,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Silence button")
- # clear resolve report cache
- cache_key = "render_after_resolve_report_json_{}".format(self.pk)
- cache.delete(cache_key)
-
if self.acknowledged:
self.unacknowledge()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_ACK, author=user, reason="Silence button")
@@ -1060,8 +1024,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
author=user,
reason="Bulk action acknowledge",
)
- # clear resolve report cache
- alert_group.drop_cached_after_resolve_report_json()
for alert_group in alert_groups_to_unsilence_before_acknowledge_list:
alert_group.log_records.create(
@@ -1194,8 +1156,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
reason="Bulk action restart",
)
- alert_group.drop_cached_after_resolve_report_json()
-
if alert_group.is_root_alert_group:
alert_group.start_escalation_if_needed()
@@ -1293,7 +1253,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
author=user,
reason="Bulk action silence",
)
- alert_group.drop_cached_after_resolve_report_json()
for alert_group in alert_groups_to_unsilence_before_silence_list:
alert_group.log_records.create(
@@ -1483,7 +1442,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
else:
return "Acknowledged"
- def non_cached_after_resolve_report_json(self):
+ def render_after_resolve_report_json(self):
AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
ResolutionNote = apps.get_model("alerts", "ResolutionNote")
@@ -1501,21 +1460,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
result_log_report.append(log_record.render_log_line_json())
return result_log_report
- def render_after_resolve_report_json(self):
- cache_key = "render_after_resolve_report_json_{}".format(self.pk)
-
- # cache.get_or_set in some cases returns None, so use get and set cache methods separately
- log_report = cache.get(cache_key)
- if log_report is None:
- log_report = self.non_cached_after_resolve_report_json()
- cache.set(cache_key, log_report)
- return log_report
-
- def drop_cached_after_resolve_report_json(self):
- cache_key = "render_after_resolve_report_json_{}".format(self.pk)
- if cache_key in cache:
- cache.delete(cache_key)
-
@property
def has_resolution_notes(self):
return self.resolution_notes.exists()
@@ -1595,14 +1539,3 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
)
return stop_escalation_log
-
-
-@receiver(post_save, sender=AlertGroup)
-def listen_for_alert_group_model_save(sender, instance, created, *args, **kwargs):
- if (
- kwargs is not None
- and "update_fields" in kwargs
- and kwargs["update_fields"] is dict
- and "cached_render_for_web" not in kwargs["update_fields"]
- ):
- transaction.on_commit(instance.schedule_cache_for_alert_group)
diff --git a/engine/apps/alerts/models/alert_group_log_record.py b/engine/apps/alerts/models/alert_group_log_record.py
index 7e5e30c9..c2bacc7d 100644
--- a/engine/apps/alerts/models/alert_group_log_record.py
+++ b/engine/apps/alerts/models/alert_group_log_record.py
@@ -3,7 +3,7 @@ import logging
import humanize
from django.apps import apps
-from django.db import models, transaction
+from django.db import models
from django.db.models import JSONField
from django.db.models.signals import post_save
from django.dispatch import receiver
@@ -546,7 +546,6 @@ class AlertGroupLogRecord(models.Model):
@receiver(post_save, sender=AlertGroupLogRecord)
def listen_for_alertgrouplogrecord(sender, instance, created, *args, **kwargs):
- instance.alert_group.drop_cached_after_resolve_report_json()
if instance.type != AlertGroupLogRecord.TYPE_DELETED:
if not instance.alert_group.is_maintenance_incident:
alert_group_pk = instance.alert_group.pk
@@ -555,6 +554,3 @@ def listen_for_alertgrouplogrecord(sender, instance, created, *args, **kwargs):
f"alert group event: {instance.get_type_display()}"
)
send_update_log_report_signal.apply_async(kwargs={"alert_group_pk": alert_group_pk}, countdown=8)
-
- logger.info(f"Recalculate AG cache. Reason: save alert_group_log_record model {instance.pk}")
- transaction.on_commit(instance.alert_group.schedule_cache_for_web)
diff --git a/engine/apps/alerts/models/alert_receive_channel.py b/engine/apps/alerts/models/alert_receive_channel.py
index 2f0cc016..643f737e 100644
--- a/engine/apps/alerts/models/alert_receive_channel.py
+++ b/engine/apps/alerts/models/alert_receive_channel.py
@@ -19,11 +19,7 @@ from jinja2 import Template
from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
from apps.alerts.integration_options_mixin import IntegrationOptionsMixin
from apps.alerts.models.maintainable_object import MaintainableObject
-from apps.alerts.tasks import (
- disable_maintenance,
- invalidate_web_cache_for_alert_group,
- sync_grafana_alerting_contact_points,
-)
+from apps.alerts.tasks import disable_maintenance, sync_grafana_alerting_contact_points
from apps.base.messaging import get_messaging_backend_from_id
from apps.base.utils import live_settings
from apps.integrations.metadata import heartbeat
@@ -693,21 +689,6 @@ def listen_for_alertreceivechannel_model_save(sender, instance, created, *args,
create_organization_log(
instance.organization, None, OrganizationLogType.TYPE_HEARTBEAT_CREATED, description
)
- else:
- if kwargs is not None:
- if "update_fields" in kwargs:
- if kwargs["update_fields"] is not None:
- fields_to_not_to_invalidate_cache = [
- "rate_limit_message_task_id",
- "rate_limited_in_slack_at",
- "reason_to_skip_escalation",
- ]
- # Hack to not to invalidate web cache on AlertReceiveChannel.start_send_rate_limit_message_task
- for f in fields_to_not_to_invalidate_cache:
- if f in kwargs["update_fields"]:
- return
- logger.info(f"Drop AG cache. Reason: save alert_receive_channel {instance.pk}")
- invalidate_web_cache_for_alert_group.apply_async(kwargs={"channel_pk": instance.pk})
if instance.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
if created:
diff --git a/engine/apps/alerts/tasks/__init__.py b/engine/apps/alerts/tasks/__init__.py
index 3ff8501e..79b8b0ed 100644
--- a/engine/apps/alerts/tasks/__init__.py
+++ b/engine/apps/alerts/tasks/__init__.py
@@ -9,7 +9,7 @@ from .custom_button_result import custom_button_result # noqa: F401
from .delete_alert_group import delete_alert_group # noqa: F401
from .distribute_alert import distribute_alert # noqa: F401
from .escalate_alert_group import escalate_alert_group # noqa: F401
-from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401
+from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401, todo: remove
from .invite_user_to_join_incident import invite_user_to_join_incident # noqa: F401
from .maintenance import disable_maintenance # noqa: F401
from .notify_all import notify_all_task # noqa: F401
diff --git a/engine/apps/alerts/tasks/cache_alert_group_for_web.py b/engine/apps/alerts/tasks/cache_alert_group_for_web.py
index 677e0a19..5f0c52d5 100644
--- a/engine/apps/alerts/tasks/cache_alert_group_for_web.py
+++ b/engine/apps/alerts/tasks/cache_alert_group_for_web.py
@@ -1,54 +1,19 @@
-from celery.utils.log import get_task_logger
-from django.apps import apps
from django.conf import settings
-from django.core.cache import cache
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
-logger = get_task_logger(__name__)
-
-
-def get_cache_key_caching_alert_group_for_web(alert_group_pk):
- CACHE_KEY_PREFIX = "cache_alert_group_for_web"
- return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
-
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def schedule_cache_for_alert_group(alert_group_pk):
- CACHE_FOR_ALERT_GROUP_LIFETIME = 60
- START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
-
- task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
- cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
- cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
+ # todo: remove
+ pass
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def cache_alert_group_for_web(alert_group_pk):
- """
- Async task to re-cache alert_group for web.
- """
- cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
- cached_task_id = cache.get(cache_key)
- current_task_id = cache_alert_group_for_web.request.id
-
- if cached_task_id is None:
- return (
- f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
- f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
- f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
- )
- if not current_task_id == cached_task_id or cached_task_id is None:
- return (
- f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
- f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
- )
- else:
- AlertGroup = apps.get_model("alerts", "AlertGroup")
- alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
- alert_group.cache_for_web(alert_group.channel.organization)
- logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")
+ # todo: remove
+ pass
diff --git a/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py b/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
index d9c7c4f9..9c8786d9 100644
--- a/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
+++ b/engine/apps/alerts/tasks/invalidate_web_cache_for_alert_group.py
@@ -1,32 +1,11 @@
-from django.apps import apps
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
-from .task_logger import task_logger
-
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def invalidate_web_cache_for_alert_group(org_pk=None, channel_pk=None, alert_group_pk=None, alert_group_pks=None):
- AlertGroup = apps.get_model("alerts", "AlertGroup")
- DynamicSetting = apps.get_model("base", "DynamicSetting")
-
- if channel_pk:
- task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_receive_channel {channel_pk}")
- q = AlertGroup.all_objects.filter(channel__pk=channel_pk)
- elif org_pk:
- task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - organization {org_pk}")
- q = AlertGroup.all_objects.filter(channel__organization__pk=org_pk)
- elif alert_group_pk:
- task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_group {alert_group_pk}")
- q = AlertGroup.all_objects.filter(pk=alert_group_pk)
- elif alert_group_pks:
- task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_groups {alert_group_pks}")
- q = AlertGroup.all_objects.filter(pk__in=alert_group_pks)
-
- skip_task = DynamicSetting.objects.get_or_create(name="skip_invalidate_web_cache_for_alert_group")[0]
- if skip_task.boolean_value:
- return "Task has been skipped because of skip_invalidate_web_cache_for_alert_group DynamicSetting"
- q.update(cached_render_for_web={})
+ # todo: remove
+ pass
diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py
index 47d232c9..57d902b2 100644
--- a/engine/apps/alerts/tasks/notify_user.py
+++ b/engine/apps/alerts/tasks/notify_user.py
@@ -386,9 +386,7 @@ def perform_notification(log_record_pk):
"status": f"{alert_group.status}",
"aps": {
"alert": f"Critical page: {message}",
- # This is disabled until we gain the Critical Alerts Api permission from apple
- # "interruption-level": "critical",
- "interruption-level": "time-sensitive",
+ "interruption-level": "critical",
"sound": "ambulance.aiff",
},
},
diff --git a/engine/apps/alerts/tests/test_terraform_renderer.py b/engine/apps/alerts/tests/test_terraform_renderer.py
index 1661d89c..ca008db8 100644
--- a/engine/apps/alerts/tests/test_terraform_renderer.py
+++ b/engine/apps/alerts/tests/test_terraform_renderer.py
@@ -138,7 +138,7 @@ def test_render_terraform_imports(
result = renderer.render_state()
expected_result = rendered_terraform_imports_template.format(
- escalation_chain_name=escalation_chain.name,
+ escalation_chain_name=slugify(escalation_chain.name),
escalation_chain_public_primary_key=escalation_chain.public_primary_key,
integration_name=slugify(integration.verbal_name),
integration_public_primary_key=integration.public_primary_key,
diff --git a/engine/apps/alerts/utils.py b/engine/apps/alerts/utils.py
index fc757b6b..58ba22ea 100644
--- a/engine/apps/alerts/utils.py
+++ b/engine/apps/alerts/utils.py
@@ -6,6 +6,8 @@ from urllib.parse import urlparse
import requests
+from apps.base.utils import live_settings
+
OUTGOING_WEBHOOK_TIMEOUT = 10
@@ -52,13 +54,15 @@ def request_outgoing_webhook(webhook_url, http_request_type, post_kwargs={}) ->
return False, "Malformed url"
if not parsed_url.netloc:
return False, "Malformed url"
- # Get the ip address of the webhook url and check if it belongs to the private network
- try:
- webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
- except socket.gaierror:
- return False, "Cannot resolve name in url"
- if ipaddress.ip_address(socket.gethostbyname(webhook_url_ip_address)).is_private:
- return False, "This url is not supported for outgoing webhooks"
+ if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
+ # Get the ip address of the webhook url and check if it belongs to the private network
+ try:
+ webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
+ except socket.gaierror:
+ return False, "Cannot resolve name in url"
+ if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
+ if ipaddress.ip_address(socket.gethostbyname(webhook_url_ip_address)).is_private:
+ return False, "This url is not supported for outgoing webhooks"
try:
if http_request_type == "POST":
diff --git a/engine/apps/api/serializers/alert_group.py b/engine/apps/api/serializers/alert_group.py
index a9e5f9f2..df5583c4 100644
--- a/engine/apps/api/serializers/alert_group.py
+++ b/engine/apps/api/serializers/alert_group.py
@@ -1,7 +1,5 @@
import logging
-from datetime import datetime
-import humanize
from rest_framework import serializers
from apps.alerts.incident_appearance.renderers.classic_markdown_renderer import AlertGroupClassicMarkdownRenderer
@@ -29,51 +27,31 @@ class ShortAlertGroupSerializer(serializers.ModelSerializer):
return AlertGroupWebRenderer(obj).render()
-class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
- """
- Attention: It's heavily cached. Make sure to invalidate alertgroup's web cache if you update the format!
- """
-
+class AlertGroupListSerializer(EagerLoadingMixin, serializers.ModelSerializer):
pk = serializers.CharField(read_only=True, source="public_primary_key")
alert_receive_channel = FastAlertReceiveChannelSerializer(source="channel")
- alerts = serializers.SerializerMethodField("get_limited_alerts")
- resolved_by_verbose = serializers.CharField(source="get_resolved_by_display")
+ status = serializers.ReadOnlyField()
resolved_by_user = FastUserSerializer(required=False)
acknowledged_by_user = FastUserSerializer(required=False)
silenced_by_user = FastUserSerializer(required=False)
related_users = serializers.SerializerMethodField()
-
- last_alert_at = serializers.SerializerMethodField()
-
- started_at_verbose = serializers.SerializerMethodField()
- acknowledged_at_verbose = serializers.SerializerMethodField()
- resolved_at_verbose = serializers.SerializerMethodField()
- silenced_at_verbose = serializers.SerializerMethodField()
-
dependent_alert_groups = ShortAlertGroupSerializer(many=True)
root_alert_group = ShortAlertGroupSerializer()
- alerts_count = serializers.ReadOnlyField()
-
- status = serializers.ReadOnlyField()
+ alerts_count = serializers.IntegerField(read_only=True)
render_for_web = serializers.SerializerMethodField()
render_for_classic_markdown = serializers.SerializerMethodField()
PREFETCH_RELATED = [
- "alerts",
"dependent_alert_groups",
- "log_records",
"log_records__author",
- "log_records__escalation_policy",
- "log_records__invitation__invitee",
]
SELECT_RELATED = [
- "slack_message",
"channel__organization",
- "slack_message___slack_team_identity",
- "acknowledged_by_user",
+ "root_alert_group",
"resolved_by_user",
+ "acknowledged_by_user",
"silenced_by_user",
]
@@ -87,7 +65,6 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"alert_receive_channel",
"resolved",
"resolved_by",
- "resolved_by_verbose",
"resolved_by_user",
"resolved_at",
"acknowledged_at",
@@ -98,48 +75,30 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"silenced",
"silenced_by_user",
"silenced_at",
- "silenced_at_verbose",
"silenced_until",
"started_at",
- "last_alert_at",
"silenced_until",
- "permalink",
- "alerts",
"related_users",
- "started_at_verbose",
- "acknowledged_at_verbose",
- "resolved_at_verbose",
"render_for_web",
- "render_after_resolve_report_json",
"render_for_classic_markdown",
"dependent_alert_groups",
"root_alert_group",
"status",
]
- def get_last_alert_at(self, obj):
- last_alert = obj.alerts.last()
- # TODO: This is a Hotfix for 0.0.27
- if last_alert is None:
- logger.warning(f"obj {obj} doesn't have last_alert!")
- return ""
- return str(last_alert.created_at)
+ def get_render_for_web(self, obj):
+ # alert group has no alerts
+ if not obj.last_alert:
+ return {}
- def get_limited_alerts(self, obj):
- """
- Overriding default alerts because there are alert_groups with thousands of them.
- It's just too slow, we need to cut here.
- """
- alerts = obj.alerts.all()[:100]
-
- if len(alerts) > 90:
- for alert in alerts:
- alert.title = str(alert.title) + " Only last 100 alerts are shown. Use Amixr API to fetch all of them."
-
- return AlertSerializer(alerts, many=True).data
+ return AlertGroupWebRenderer(obj, obj.last_alert).render()
def get_render_for_classic_markdown(self, obj):
- return AlertGroupClassicMarkdownRenderer(obj).render()
+ # alert group has no alerts
+ if not obj.last_alert:
+ return {}
+
+ return AlertGroupClassicMarkdownRenderer(obj, obj.last_alert).render()
def get_related_users(self, obj):
users_ids = set()
@@ -165,37 +124,44 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
users_ids.add(log_record.author.public_primary_key)
return users
- def get_started_at_verbose(self, obj):
- started_at_verbose = None
- if obj.started_at is not None:
- started_at_verbose = humanize.naturaltime(
- datetime.now().replace(tzinfo=None) - obj.started_at.replace(tzinfo=None)
- )
- return started_at_verbose
- def get_acknowledged_at_verbose(self, obj):
- acknowledged_at_verbose = None
- if obj.acknowledged_at is not None:
- acknowledged_at_verbose = humanize.naturaltime(
- datetime.now().replace(tzinfo=None) - obj.acknowledged_at.replace(tzinfo=None)
- ) # TODO: Deal with timezones
- return acknowledged_at_verbose
+class AlertGroupSerializer(AlertGroupListSerializer):
+ alerts = serializers.SerializerMethodField("get_limited_alerts")
+ last_alert_at = serializers.SerializerMethodField()
- def get_resolved_at_verbose(self, obj):
- resolved_at_verbose = None
- if obj.resolved_at is not None:
- resolved_at_verbose = humanize.naturaltime(
- datetime.now().replace(tzinfo=None) - obj.resolved_at.replace(tzinfo=None)
- ) # TODO: Deal with timezones
- return resolved_at_verbose
-
- def get_silenced_at_verbose(self, obj):
- silenced_at_verbose = None
- if obj.silenced_at is not None:
- silenced_at_verbose = humanize.naturaltime(
- datetime.now().replace(tzinfo=None) - obj.silenced_at.replace(tzinfo=None)
- ) # TODO: Deal with timezones
- return silenced_at_verbose
+ class Meta(AlertGroupListSerializer.Meta):
+ fields = AlertGroupListSerializer.Meta.fields + [
+ "alerts",
+ "render_after_resolve_report_json",
+ "permalink",
+ "last_alert_at",
+ ]
def get_render_for_web(self, obj):
+ # alert group has no alerts
+ alert = obj.alerts.last()
+ if not alert:
+ return {}
+
return AlertGroupWebRenderer(obj).render()
+
+ def get_last_alert_at(self, obj):
+ last_alert = obj.alerts.last()
+
+ if not last_alert:
+ return obj.started_at
+
+ return last_alert.created_at
+
+ def get_limited_alerts(self, obj):
+ """
+ Overriding default alerts because there are alert_groups with thousands of them.
+ It's just too slow, we need to cut here.
+ """
+ alerts = obj.alerts.all()[:100]
+
+ if len(alerts) > 90:
+ for alert in alerts:
+ alert.title = str(alert.title) + " Only last 100 alerts are shown. Use OnCall API to fetch all of them."
+
+ return AlertSerializer(alerts, many=True).data
diff --git a/engine/apps/api/serializers/resolution_note.py b/engine/apps/api/serializers/resolution_note.py
index 330259e3..00178685 100644
--- a/engine/apps/api/serializers/resolution_note.py
+++ b/engine/apps/api/serializers/resolution_note.py
@@ -1,7 +1,6 @@
from rest_framework import serializers
from apps.alerts.models import AlertGroup, ResolutionNote
-from apps.alerts.tasks import invalidate_web_cache_for_alert_group
from apps.api.serializers.user import FastUserSerializer
from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
from common.api_helpers.exceptions import BadRequest
@@ -39,9 +38,6 @@ class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
validated_data["author"] = self.context["request"].user
validated_data["source"] = ResolutionNote.Source.WEB
created_instance = super().create(validated_data)
- # Invalidate alert group cache because resolution notes shown in alert group's timeline
- created_instance.alert_group.drop_cached_after_resolve_report_json()
- invalidate_web_cache_for_alert_group(alert_group_pk=created_instance.alert_group.pk)
return created_instance
def to_representation(self, instance):
@@ -57,8 +53,5 @@ class ResolutionNoteUpdateSerializer(ResolutionNoteSerializer):
def update(self, instance, validated_data):
if instance.source != ResolutionNote.Source.WEB:
raise BadRequest(detail="Cannot update message with this source type")
- updated_instance = super().update(instance, validated_data)
- # Invalidate alert group cache because resolution notes shown in alert group's timeline
- updated_instance.alert_group.drop_cached_after_resolve_report_json()
- invalidate_web_cache_for_alert_group(alert_group_pk=updated_instance.alert_group.pk)
- return updated_instance
+
+ return super().update(instance, validated_data)
diff --git a/engine/apps/api/tasks.py b/engine/apps/api/tasks.py
deleted file mode 100644
index 4240178a..00000000
--- a/engine/apps/api/tasks.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from celery.utils.log import get_task_logger
-from django.apps import apps
-from django.conf import settings
-from django.core.cache import cache
-
-from common.custom_celery_tasks import shared_dedicated_queue_retry_task
-
-logger = get_task_logger(__name__)
-
-
-def get_cache_key_caching_alert_group_for_web(alert_group_pk):
- CACHE_KEY_PREFIX = "cache_alert_group_for_web"
- return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
-
-
-# TODO: remove this tasks after all of them will be processed in prod
-@shared_dedicated_queue_retry_task(
- autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
-)
-def schedule_cache_for_alert_group(alert_group_pk):
- CACHE_FOR_ALERT_GROUP_LIFETIME = 60
- START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
-
- task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
- cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
- cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
-
-
-@shared_dedicated_queue_retry_task(
- autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
-)
-def cache_alert_group_for_web(alert_group_pk):
- """
- Async task to re-cache alert_group for web.
- """
- cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
- cached_task_id = cache.get(cache_key)
- current_task_id = cache_alert_group_for_web.request.id
-
- if cached_task_id is None:
- return (
- f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
- f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
- f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
- )
- if not current_task_id == cached_task_id or cached_task_id is None:
- return (
- f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
- f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
- )
- else:
- AlertGroup = apps.get_model("alerts", "AlertGroup")
- alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
- alert_group.cache_for_web(alert_group.channel.organization)
- logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")
diff --git a/engine/apps/api/tests/test_alert_group.py b/engine/apps/api/tests/test_alert_group.py
index 983a22bf..6d4a0b9e 100644
--- a/engine/apps/api/tests/test_alert_group.py
+++ b/engine/apps/api/tests/test_alert_group.py
@@ -63,7 +63,7 @@ def test_get_filter_started_at(alert_group_internal_api_setup, make_user_auth_he
)
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 4
+ assert len(response.data["results"]) == 4
@pytest.mark.django_db
@@ -78,7 +78,7 @@ def test_get_filter_resolved_at_alertgroup_empty_result(alert_group_internal_api
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 0
+ assert len(response.data["results"]) == 0
@pytest.mark.django_db
@@ -105,7 +105,7 @@ def test_get_filter_resolved_at(alert_group_internal_api_setup, make_user_auth_h
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
@pytest.mark.django_db
@@ -117,7 +117,7 @@ def test_status_new(alert_group_internal_api_setup, make_user_auth_headers):
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=0", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == new_alert_group.public_primary_key
@@ -130,7 +130,7 @@ def test_status_ack(alert_group_internal_api_setup, make_user_auth_headers):
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=1", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == ack_alert_group.public_primary_key
@@ -143,7 +143,7 @@ def test_status_resolved(alert_group_internal_api_setup, make_user_auth_headers)
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=2", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == resolved_alert_group.public_primary_key
@@ -156,7 +156,7 @@ def test_status_silenced(alert_group_internal_api_setup, make_user_auth_headers)
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=3", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == silenced_alert_group.public_primary_key
@@ -171,7 +171,7 @@ def test_all_statuses(alert_group_internal_api_setup, make_user_auth_headers):
url + "?status=0&status=1&&status=2&status=3", format="json", **make_user_auth_headers(user, token)
)
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 4
+ assert len(response.data["results"]) == 4
@pytest.mark.django_db
@@ -213,7 +213,7 @@ def test_get_filter_resolved_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 1
+ assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?resolved_by={second_user.public_primary_key}",
@@ -221,7 +221,7 @@ def test_get_filter_resolved_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
- assert second_response.data["count"] == 0
+ assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@@ -269,7 +269,7 @@ def test_get_filter_resolved_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 2
+ assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@@ -309,7 +309,7 @@ def test_get_filter_acknowledged_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 1
+ assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?acknowledged_by={second_user.public_primary_key}",
@@ -317,7 +317,7 @@ def test_get_filter_acknowledged_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
- assert second_response.data["count"] == 0
+ assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@@ -363,7 +363,7 @@ def test_get_filter_acknowledged_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 2
+ assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@@ -402,7 +402,7 @@ def test_get_filter_silenced_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 1
+ assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?silenced_by={second_user.public_primary_key}",
@@ -410,7 +410,7 @@ def test_get_filter_silenced_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
- assert second_response.data["count"] == 0
+ assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@@ -455,7 +455,7 @@ def test_get_filter_silenced_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 2
+ assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@@ -494,7 +494,7 @@ def test_get_filter_invitees_are(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 1
+ assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?invitees_are={second_user.public_primary_key}",
@@ -502,7 +502,7 @@ def test_get_filter_invitees_are(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
- assert second_response.data["count"] == 0
+ assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@@ -548,7 +548,7 @@ def test_get_filter_invitees_are_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 2
+ assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@@ -593,7 +593,7 @@ def test_get_filter_invitees_are_ag_with_multiple_logs(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
- assert first_response.data["count"] == 1
+ assert len(first_response.data["results"]) == 1
@pytest.mark.django_db
@@ -611,11 +611,11 @@ def test_get_filter_with_resolution_note(
# there are no alert groups with resolution_notes
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 0
+ assert len(response.data["results"]) == 0
response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 4
+ assert len(response.data["results"]) == 4
# add resolution_notes to two of four alert groups
make_resolution_note(res_alert_group)
@@ -623,11 +623,11 @@ def test_get_filter_with_resolution_note(
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 2
+ assert len(response.data["results"]) == 2
response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 2
+ assert len(response.data["results"]) == 2
@pytest.mark.django_db
@@ -653,7 +653,7 @@ def test_get_filter_with_resolution_note_after_delete_resolution_note(
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
- assert response.data["count"] == 1
+ assert len(response.data["results"]) == 1
@pytest.mark.django_db
diff --git a/engine/apps/api/tests/test_schedules.py b/engine/apps/api/tests/test_schedules.py
index 7e49a987..9560f2c6 100644
--- a/engine/apps/api/tests/test_schedules.py
+++ b/engine/apps/api/tests/test_schedules.py
@@ -432,6 +432,7 @@ def test_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
+ "is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@@ -497,6 +498,7 @@ def test_filter_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
+ "is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@@ -512,6 +514,7 @@ def test_filter_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
+ "is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@@ -594,6 +597,7 @@ def test_filter_events_range_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
+ "is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@@ -675,6 +679,7 @@ def test_filter_events_overrides(
"calendar_type": OnCallSchedule.OVERRIDES,
"is_empty": False,
"is_gap": False,
+ "is_override": True,
"shift": {
"pk": override.public_primary_key,
},
@@ -737,7 +742,7 @@ def test_filter_events_final_schedule(
# override: 22-23 / E
override_data = {
"start": start_date + timezone.timedelta(hours=22),
- "rotation_start": start_date,
+ "rotation_start": start_date + timezone.timedelta(hours=22),
"duration": timezone.timedelta(hours=1),
"schedule": schedule,
}
@@ -772,6 +777,7 @@ def test_filter_events_final_schedule(
"calendar_type": 1 if is_override else None if is_gap else 0,
"end": start_date + timezone.timedelta(hours=start + duration),
"is_gap": is_gap,
+ "is_override": is_override,
"priority_level": priority,
"start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0),
"user": user,
@@ -783,6 +789,7 @@ def test_filter_events_final_schedule(
"calendar_type": e["calendar_type"],
"end": e["end"],
"is_gap": e["is_gap"],
+ "is_override": e["is_override"],
"priority_level": e["priority_level"],
"start": e["start"],
"user": e["users"][0]["display_name"] if e["users"] else None,
@@ -792,6 +799,75 @@ def test_filter_events_final_schedule(
assert returned_events == expected_events
+@pytest.mark.django_db
+def test_next_shifts_per_user(
+ make_organization_and_user_with_plugin_token,
+ make_user_for_organization,
+ make_user_auth_headers,
+ make_schedule,
+ make_on_call_shift,
+):
+ organization, user, token = make_organization_and_user_with_plugin_token()
+ client = APIClient()
+
+ schedule = make_schedule(
+ organization,
+ schedule_class=OnCallScheduleWeb,
+ name="test_web_schedule",
+ )
+
+ tomorrow = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) + timezone.timedelta(days=1)
+ user_a, user_b, user_c = (make_user_for_organization(organization, username=i) for i in "ABC")
+
+ shifts = (
+ # user, priority, start time (h), duration (hs)
+ (user_a, 1, 8, 2), # r1-1: 8-10 / A
+ (user_a, 1, 15, 2), # r1-2: 15-17 / A
+ (user_b, 2, 7, 5), # r2-1: 7-12 / B
+ (user_b, 2, 16, 2), # r2-2: 16-18 / B
+ (user_c, 2, 18, 2), # r2-3: 18-20 / C
+ )
+ for user, priority, start_h, duration in shifts:
+ data = {
+ "start": tomorrow + timezone.timedelta(hours=start_h),
+ "rotation_start": tomorrow,
+ "duration": timezone.timedelta(hours=duration),
+ "priority_level": priority,
+ "frequency": CustomOnCallShift.FREQUENCY_DAILY,
+ "schedule": schedule,
+ }
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
+ )
+ on_call_shift.users.add(user)
+
+ # override: 17-18 / C
+ override_data = {
+ "start": tomorrow + timezone.timedelta(hours=17),
+ "rotation_start": tomorrow + timezone.timedelta(hours=17),
+ "duration": timezone.timedelta(hours=1),
+ "schedule": schedule,
+ }
+ override = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_OVERRIDE, **override_data
+ )
+ override.add_rolling_users([[user_c]])
+
+ # final schedule: 7-12: B, 15-16: A, 16-17: B, 17-18: C (override), 18-20: C
+
+ url = reverse("api-internal:schedule-next-shifts-per-user", kwargs={"pk": schedule.public_primary_key})
+ response = client.get(url, format="json", **make_user_auth_headers(user, token))
+ assert response.status_code == status.HTTP_200_OK
+
+ expected = {
+ user_a.public_primary_key: (tomorrow + timezone.timedelta(hours=15), tomorrow + timezone.timedelta(hours=16)),
+ user_b.public_primary_key: (tomorrow + timezone.timedelta(hours=7), tomorrow + timezone.timedelta(hours=12)),
+ user_c.public_primary_key: (tomorrow + timezone.timedelta(hours=17), tomorrow + timezone.timedelta(hours=18)),
+ }
+ returned_data = {u: (ev["start"], ev["end"]) for u, ev in response.data["users"].items()}
+ assert returned_data == expected
+
+
@pytest.mark.django_db
def test_filter_events_invalid_type(
make_organization_and_user_with_plugin_token,
diff --git a/engine/apps/api/views/alert_group.py b/engine/apps/api/views/alert_group.py
index 5ea7e93b..838a372e 100644
--- a/engine/apps/api/views/alert_group.py
+++ b/engine/apps/api/views/alert_group.py
@@ -1,10 +1,6 @@
-from datetime import datetime, timedelta
+from datetime import timedelta
-from django import forms
-from django.db import models
-from django.db.models import CharField, Q
-from django.db.models.constants import LOOKUP_SEP
-from django.db.models.functions import Cast
+from django.db.models import Count, Max, Q
from django.utils import timezone
from django_filters import rest_framework as filters
from django_filters.widgets import RangeWidget
@@ -15,16 +11,15 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.alerts.constants import ActionSource
-from apps.alerts.models import AlertGroup, AlertReceiveChannel
-from apps.alerts.tasks import invalidate_web_cache_for_alert_group
+from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdminOrEditor
-from apps.api.serializers.alert_group import AlertGroupSerializer
+from apps.api.serializers.alert_group import AlertGroupListSerializer, AlertGroupSerializer
from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthentication
from apps.user_management.models import User
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
from common.api_helpers.mixins import PreviewTemplateMixin, PublicPrimaryKeyMixin
-from common.api_helpers.paginators import FiftyPageSizePaginator
+from common.api_helpers.paginators import TwentyFiveCursorPaginator
def get_integration_queryset(request):
@@ -148,34 +143,6 @@ class AlertGroupFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.Filt
return queryset
-class CustomSearchFilter(SearchFilter):
- def must_call_distinct(self, queryset, search_fields):
- """
- Return True if 'distinct()' should be used to query the given lookups.
- """
- for search_field in search_fields:
- opts = queryset.model._meta
- if search_field[0] in self.lookup_prefixes:
- search_field = search_field[1:]
-
- # From https://github.com/encode/django-rest-framework/pull/6240/files#diff-01f357e474dd8fd702e4951b9227bffcR88
- # Annotated fields do not need to be distinct
- if isinstance(queryset, models.QuerySet) and search_field in queryset.query.annotations:
- continue
-
- parts = search_field.split(LOOKUP_SEP)
- for part in parts:
- field = opts.get_field(part)
- if hasattr(field, "get_path_info"):
- # This field is a relation, update opts to follow the relation
- path_info = field.get_path_info()
- opts = path_info[-1].to_opts
- if any(path.m2m for path in path_info):
- # This field is a m2m relation so we know we need to call distinct
- return True
- return False
-
-
class AlertGroupView(
PreviewTemplateMixin,
PublicPrimaryKeyMixin,
@@ -216,90 +183,90 @@ class AlertGroupView(
serializer_class = AlertGroupSerializer
- pagination_class = FiftyPageSizePaginator
+ pagination_class = TwentyFiveCursorPaginator
- filter_backends = [CustomSearchFilter, filters.DjangoFilterBackend]
- search_fields = ["cached_render_for_web_str"]
+ filter_backends = [SearchFilter, filters.DjangoFilterBackend]
+ # todo: add ability to search by templated title
+ search_fields = ["public_primary_key", "inside_organization_number"]
filterset_class = AlertGroupFilter
- def list(self, request, *args, **kwargs):
- """
- It's compute-heavy so we rely on cache here.
- Attention: Make sure to invalidate cache if you update the format!
- """
- queryset = self.filter_queryset(self.get_queryset(eager=False, readonly=True))
+ def get_serializer_class(self):
+ if self.action == "list":
+ return AlertGroupListSerializer
- page = self.paginate_queryset(queryset)
- skip_slow_rendering = request.query_params.get("skip_slow_rendering") == "true"
- data = []
+ return super().get_serializer_class()
- for alert_group in page:
- if alert_group.cached_render_for_web == {}:
- # We cannot give empty data to web. So caching synchronously here.
- if skip_slow_rendering:
- # We just return dummy data.
- # Cache is not launched because after skip_slow_rendering request should come usual one
- # which will start caching
- data.append({"pk": alert_group.pk, "short": True})
- else:
- # Synchronously cache and return. It could be slow.
- alert_group.cache_for_web(alert_group.channel.organization)
- data.append(alert_group.cached_render_for_web)
- else:
- data.append(alert_group.cached_render_for_web)
- if not skip_slow_rendering:
- # Cache is not launched because after skip_slow_rendering request should come usual one
- # which will start caching
- alert_group.schedule_cache_for_web()
+ def get_queryset(self):
+ # no select_related or prefetch_related is used at this point, it will be done on paginate_queryset.
+ queryset = AlertGroup.unarchived_objects.filter(
+ channel__organization=self.request.auth.organization, channel__team=self.request.user.current_team
+ ).only("id")
- return self.get_paginated_response(data)
-
- def get_queryset(self, eager=True, readonly=False, order=True):
- if readonly:
- queryset = AlertGroup.unarchived_objects.using_readonly_db
- else:
- queryset = AlertGroup.unarchived_objects
-
- queryset = queryset.filter(
- channel__organization=self.request.auth.organization,
- channel__team=self.request.user.current_team,
- )
-
- if order:
- queryset = queryset.order_by("-started_at")
-
- queryset = queryset.annotate(cached_render_for_web_str=Cast("cached_render_for_web", output_field=CharField()))
-
- if eager:
- queryset = self.serializer_class.setup_eager_loading(queryset)
return queryset
- def get_alert_groups_and_days_for_previous_same_period(self):
- prev_alert_groups = AlertGroup.unarchived_objects.none()
- delta_days = None
+ def paginate_queryset(self, queryset):
+ """
+ All SQL joins (select_related and prefetch_related) will be performed AFTER pagination, so it only joins tables
+ for 25 alert groups, not the whole table.
+ """
+ alert_groups = super().paginate_queryset(queryset)
+ alert_groups = self.enrich(alert_groups)
+ return alert_groups
- started_at = self.request.query_params.get("started_at", None)
- if started_at is not None:
- started_at_gte, started_at_lte = AlertGroupFilter.parse_custom_datetime_range(started_at)
- delta_days = None
- if started_at_lte is not None:
- started_at_lte = forms.DateTimeField().to_python(started_at_lte)
- else:
- started_at_lte = datetime.now()
+ def get_object(self):
+ obj = super().get_object()
+ obj = self.enrich([obj])[0]
+ return obj
- if started_at_gte is not None:
- started_at_gte = forms.DateTimeField().to_python(value=started_at_gte)
- delta = started_at_lte.replace(tzinfo=None) - started_at_gte.replace(tzinfo=None)
- prev_alert_groups = self.get_queryset().filter(
- started_at__range=[started_at_gte - delta, started_at_gte]
- )
- delta_days = delta.days
- return prev_alert_groups, delta_days
+ def enrich(self, alert_groups):
+ """
+ This method performs select_related and prefetch_related (using setup_eager_loading) as well as in-memory joins
+ to add additional info like alert_count and last_alert for every alert group efficiently.
+ We need the last_alert because it's used by AlertGroupWebRenderer.
+ """
+
+ # enrich alert groups with select_related and prefetch_related
+ alert_group_pks = [alert_group.pk for alert_group in alert_groups]
+ queryset = AlertGroup.all_objects.filter(pk__in=alert_group_pks).order_by("-pk")
+
+ # do not load cached_render_for_web as it's deprecated and can be very large
+ queryset = queryset.defer("cached_render_for_web")
+
+ queryset = self.get_serializer_class().setup_eager_loading(queryset)
+ alert_groups = list(queryset)
+
+ # get info on alerts count and last alert ID for every alert group
+ alerts_info = (
+ Alert.objects.values("group_id")
+ .filter(group_id__in=alert_group_pks)
+ .annotate(alerts_count=Count("group_id"), last_alert_id=Max("id"))
+ )
+ alerts_info_map = {info["group_id"]: info for info in alerts_info}
+
+ # fetch last alerts for every alert group
+ last_alert_ids = [info["last_alert_id"] for info in alerts_info_map.values()]
+ last_alerts = Alert.objects.filter(pk__in=last_alert_ids)
+ for alert in last_alerts:
+ # link group back to alert
+ alert.group = [alert_group for alert_group in alert_groups if alert_group.pk == alert.group_id][0]
+ alerts_info_map[alert.group_id].update({"last_alert": alert})
+
+ # add additional "alerts_count" and "last_alert" fields to every alert group
+ for alert_group in alert_groups:
+ try:
+ alert_group.last_alert = alerts_info_map[alert_group.pk]["last_alert"]
+ alert_group.alerts_count = alerts_info_map[alert_group.pk]["alerts_count"]
+ except KeyError:
+ # alert group has no alerts
+ alert_group.last_alert = None
+ alert_group.alerts_count = 0
+
+ return alert_groups
@action(detail=False)
def stats(self, *args, **kwargs):
- alert_groups = self.filter_queryset(self.get_queryset(eager=False))
+ alert_groups = self.filter_queryset(self.get_queryset())
# Only count field is used, other fields left just in case for the backward compatibility
return Response(
{
@@ -324,7 +291,6 @@ class AlertGroupView(
if alert_group.root_alert_group is not None:
raise BadRequest(detail="Can't acknowledge an attached alert group")
alert_group.acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@@ -344,7 +310,6 @@ class AlertGroupView(
raise BadRequest(detail="Can't unacknowledge a resolved alert group")
alert_group.un_acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@@ -365,7 +330,6 @@ class AlertGroupView(
status=status.HTTP_400_BAD_REQUEST,
)
alert_group.resolve_by_user(self.request.user, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@@ -381,7 +345,6 @@ class AlertGroupView(
raise BadRequest(detail="The alert group is not resolved")
alert_group.un_resolve_by_user(self.request.user, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@@ -404,8 +367,6 @@ class AlertGroupView(
return Response(status=status.HTTP_400_BAD_REQUEST)
alert_group.attach_by_user(self.request.user, root_alert_group, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
- invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@@ -415,10 +376,8 @@ class AlertGroupView(
raise BadRequest(detail="Can't unattach maintenance alert group")
if alert_group.is_root_alert_group:
raise BadRequest(detail="Can't unattach an alert group because it is not attached")
- root_alert_group_pk = alert_group.root_alert_group_id
+
alert_group.un_attach_by_user(self.request.user, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
- invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group_pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@@ -433,7 +392,6 @@ class AlertGroupView(
raise BadRequest(detail="Can't silence an attached alert group")
alert_group.silence_by_user(request.user, silence_delay=delay, action_source=ActionSource.WEB)
- invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": request}).data)
@action(methods=["get"], detail=False)
@@ -548,9 +506,9 @@ class AlertGroupView(
raise BadRequest(detail="Please specify a delay for silence")
kwargs["silence_delay"] = delay
- alert_groups = self.get_queryset(eager=False).filter(public_primary_key__in=alert_group_public_pks)
- alert_group_pks = list(alert_groups.values_list("id", flat=True))
- invalidate_web_cache_for_alert_group(alert_group_pks=alert_group_pks)
+ alert_groups = AlertGroup.unarchived_objects.filter(
+ channel__organization=self.request.auth.organization, public_primary_key__in=alert_group_public_pks
+ )
kwargs["user"] = self.request.user
kwargs["alert_groups"] = alert_groups
diff --git a/engine/apps/api/views/live_setting.py b/engine/apps/api/views/live_setting.py
index 1718bd15..e74d1f7f 100644
--- a/engine/apps/api/views/live_setting.py
+++ b/engine/apps/api/views/live_setting.py
@@ -1,7 +1,6 @@
from contextlib import suppress
from django.conf import settings
-from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from rest_framework import status, viewsets
from rest_framework.permissions import IsAuthenticated
@@ -11,7 +10,7 @@ from apps.api.permissions import IsAdmin
from apps.api.serializers.live_setting import LiveSettingSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.base.models import LiveSetting
-from apps.base.utils import live_settings
+from apps.oss_installation.models import CloudConnector
from apps.oss_installation.tasks import sync_users_with_cloud
from apps.slack.tasks import unpopulate_slack_user_identities
from apps.telegram.client import TelegramClient
@@ -40,54 +39,50 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet):
return queryset
def perform_update(self, serializer):
+ old_value = serializer.instance.value
new_value = serializer.validated_data["value"]
- self._update_hook(new_value)
- instance = serializer.save()
- sync_users = self.request.query_params.get("sync_users", "true") == "true"
- if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN" and sync_users:
- sync_users_with_cloud.apply_async()
+
+ super().perform_update(serializer)
+
+ if new_value != old_value:
+ self._post_update_hook(old_value)
def perform_destroy(self, instance):
+ old_value = instance.value
new_value = instance.default_value
- self._update_hook(new_value)
super().perform_destroy(instance)
- def _update_hook(self, new_value):
+ if new_value != old_value:
+ self._post_update_hook(old_value)
+
+ def _post_update_hook(self, old_value):
instance = self.get_object()
if instance.name == "TELEGRAM_TOKEN":
- try:
- old_token = live_settings.TELEGRAM_TOKEN
- except ImproperlyConfigured:
- old_token = None
+ self._reset_telegram_integration(old_token=old_value)
+ register_telegram_webhook.delay()
- if old_token != new_value:
- self._reset_telegram_integration(new_token=new_value)
+ if instance.name == "TELEGRAM_WEBHOOK_HOST":
+ register_telegram_webhook.delay()
- for setting_name in ["SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET"]:
- if instance.name == setting_name:
- if getattr(live_settings, setting_name) != new_value:
- organization = self.request.auth.organization
- sti = organization.slack_team_identity
- if sti is not None:
- unpopulate_slack_user_identities.apply_async((sti.pk, True), countdown=0)
+ if instance.name in ["SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET"]:
+ organization = self.request.auth.organization
+ slack_team_identity = organization.slack_team_identity
+ if slack_team_identity is not None:
+ unpopulate_slack_user_identities.delay(organization_pk=organization.pk, force=True)
if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN":
- from apps.oss_installation.models import CloudConnector
+ CloudConnector.remove_sync()
- try:
- old_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN
- except ImproperlyConfigured:
- old_token = None
+ sync_users = self.request.query_params.get("sync_users", "true") == "true"
+ if sync_users:
+ sync_users_with_cloud.apply_async()
- if old_token != new_value:
- CloudConnector.remove_sync()
-
- def _reset_telegram_integration(self, new_token):
+ def _reset_telegram_integration(self, old_token):
# tell Telegram to cancel sending events from old bot
- with suppress(ImproperlyConfigured, error.InvalidToken, error.Unauthorized):
- old_client = TelegramClient()
+ with suppress(error.InvalidToken, error.Unauthorized):
+ old_client = TelegramClient(token=old_token)
old_client.api_client.delete_webhook()
# delete telegram channels for current team
@@ -101,6 +96,3 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet):
for user in users_with_telegram_connector:
user.telegram_connection.delete()
-
- # tell Telegram to send updates to new bot
- register_telegram_webhook.delay(token=new_token)
diff --git a/engine/apps/api/views/route_regex_debugger.py b/engine/apps/api/views/route_regex_debugger.py
index 527684ac..ffa9cc71 100644
--- a/engine/apps/api/views/route_regex_debugger.py
+++ b/engine/apps/api/views/route_regex_debugger.py
@@ -43,10 +43,7 @@ class RouteRegexDebuggerView(APIView):
if len(incidents_matching_regex) < MAX_INCIDENTS_TO_SHOW:
first_alert = ag.alerts.all()[0]
if re.search(regex, json.dumps(first_alert.raw_request_data)):
- if ag.cached_render_for_web:
- title = ag.cached_render_for_web["render_for_web"]["title"]
- else:
- title = AlertWebRenderer(first_alert).render()["title"]
+ title = AlertWebRenderer(first_alert).render()["title"]
incidents_matching_regex.append(
{
"title": title,
diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py
index f7d25ab5..25c35f10 100644
--- a/engine/apps/api/views/schedule.py
+++ b/engine/apps/api/views/schedule.py
@@ -58,6 +58,7 @@ class ScheduleView(
*READ_ACTIONS,
"events",
"filter_events",
+ "next_shifts_per_user",
"notify_empty_oncall_options",
"notify_oncall_shift_freq_options",
"mention_options",
@@ -222,6 +223,7 @@ class ScheduleView(
"calendar_type": shift["calendar_type"],
"is_empty": len(shift["users"]) == 0 and not is_gap,
"is_gap": is_gap,
+ "is_override": shift["calendar_type"] == OnCallSchedule.TYPE_ICAL_OVERRIDES,
"shift": {
"pk": shift["shift_pk"],
},
@@ -395,6 +397,25 @@ class ScheduleView(
resolved.sort(key=lambda e: e["start"])
return resolved
+ @action(detail=True, methods=["get"])
+ def next_shifts_per_user(self, request, pk):
+ """Return next shift for users in schedule."""
+ user_tz, _ = self.get_request_timezone()
+ now = timezone.now()
+ starting_date = now.date()
+ schedule = self.original_get_object()
+ shift_events = self._filter_events(schedule, user_tz, starting_date, days=30, with_empty=False, with_gap=False)
+ events = self._resolve_schedule(shift_events)
+
+ users = {}
+ for e in events:
+ user = e["users"][0]["pk"] if e["users"] else None
+ if user is not None and user not in users and e["end"] > now:
+ users[user] = e
+
+ result = {"users": users}
+ return Response(result, status=status.HTTP_200_OK)
+
@action(detail=False, methods=["get"])
def type_options(self, request):
# TODO: check if it needed
diff --git a/engine/apps/base/models/live_setting.py b/engine/apps/base/models/live_setting.py
index 28d9cd68..7652116d 100644
--- a/engine/apps/base/models/live_setting.py
+++ b/engine/apps/base/models/live_setting.py
@@ -47,6 +47,7 @@ class LiveSetting(models.Model):
"GRAFANA_CLOUD_ONCALL_TOKEN",
"GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED",
"GRAFANA_CLOUD_NOTIFICATIONS_ENABLED",
+ "DANGEROUS_WEBHOOKS_ENABLED",
)
DESCRIPTIONS = {
@@ -107,10 +108,10 @@ class LiveSetting(models.Model):
"SENDGRID_SECRET_KEY": "It is the secret key to secure receiving inbound emails.",
"SENDGRID_INBOUND_EMAIL_DOMAIN": "Domain to receive emails for inbound emails integration.",
"TELEGRAM_TOKEN": (
- "Secret token for Telegram bot, you can get one via " "BotFather."
+ "Secret token for Telegram bot, you can get one via BotFather."
),
"TELEGRAM_WEBHOOK_HOST": (
- "Externally available URL for Telegram to make requests. Please restart OnCall backend after after update."
+ "Externally available URL for Telegram to make requests. Must use https and ports 80, 88, 443, 8443."
),
"SEND_ANONYMOUS_USAGE_STATS": (
"Grafana OnCall will send anonymous, but uniquely-identifiable usage analytics to Grafana Labs."
@@ -120,6 +121,7 @@ class LiveSetting(models.Model):
"GRAFANA_CLOUD_ONCALL_TOKEN": "Secret token for Grafana Cloud OnCall instance.",
"GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED": "Enable heartbeat integration with Grafana Cloud OnCall.",
"GRAFANA_CLOUD_NOTIFICATIONS_ENABLED": "Enable SMS/call notifications via Grafana Cloud OnCall",
+ "DANGEROUS_WEBHOOKS_ENABLED": "Enable outgoing webhooks to private networks",
}
SECRET_SETTING_NAMES = (
diff --git a/engine/apps/base/models/user_notification_policy_log_record.py b/engine/apps/base/models/user_notification_policy_log_record.py
index d8afed2d..ed261b2b 100644
--- a/engine/apps/base/models/user_notification_policy_log_record.py
+++ b/engine/apps/base/models/user_notification_policy_log_record.py
@@ -315,7 +315,6 @@ class UserNotificationPolicyLogRecord(models.Model):
@receiver(post_save, sender=UserNotificationPolicyLogRecord)
def listen_for_usernotificationpolicylogrecord_model_save(sender, instance, created, *args, **kwargs):
- instance.alert_group.drop_cached_after_resolve_report_json()
alert_group_pk = instance.alert_group.pk
if instance.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED:
logger.debug(
diff --git a/engine/apps/base/utils.py b/engine/apps/base/utils.py
index 8339e295..00153a87 100644
--- a/engine/apps/base/utils.py
+++ b/engine/apps/base/utils.py
@@ -1,5 +1,6 @@
import json
import re
+from urllib.parse import urlparse
from django.apps import apps
from python_http_client import UnauthorizedError
@@ -8,6 +9,8 @@ from telegram import Bot
from twilio.base.exceptions import TwilioException
from twilio.rest import Client
+from common.api_helpers.utils import create_engine_url
+
class LiveSettingProxy:
def __dir__(self):
@@ -86,6 +89,12 @@ class LiveSettingValidator:
if not cls._is_email_valid(sendgrid_from_email):
return "Please specify a valid email"
+ @classmethod
+ def _check_slack_install_return_redirect_host(cls, slack_install_return_redirect_host):
+ scheme = urlparse(slack_install_return_redirect_host).scheme
+ if scheme != "https":
+ return "Must use https"
+
@classmethod
def _check_telegram_token(cls, telegram_token):
try:
@@ -94,6 +103,15 @@ class LiveSettingValidator:
except Exception as e:
return f"Telegram error: {str(e)}"
+ @classmethod
+ def _check_telegram_webhook_host(cls, telegram_webhook_host):
+ try:
+ url = create_engine_url("/telegram/", override_base=telegram_webhook_host)
+ bot = Bot(token=live_settings.TELEGRAM_TOKEN)
+ bot.set_webhook(url)
+ except Exception as e:
+ return f"Telegram error: {str(e)}"
+
@classmethod
def _check_grafana_cloud_oncall_token(cls, grafana_oncall_token):
from apps.oss_installation.models import CloudConnector
diff --git a/engine/apps/public_api/tests/test_incidents.py b/engine/apps/public_api/tests/test_incidents.py
index d43a1fb8..ea1198a0 100644
--- a/engine/apps/public_api/tests/test_incidents.py
+++ b/engine/apps/public_api/tests/test_incidents.py
@@ -32,7 +32,7 @@ def construct_expected_response_from_incidents(incidents):
"id": incident.public_primary_key,
"integration_id": incident.channel.public_primary_key,
"route_id": incident.channel_filter.public_primary_key,
- "alerts_count": incident.alerts_count,
+ "alerts_count": incident.alerts.count(),
"state": incident.state,
"created_at": created_at,
"resolved_at": resolved_at,
diff --git a/engine/apps/schedules/models/custom_on_call_shift.py b/engine/apps/schedules/models/custom_on_call_shift.py
index 94782050..fe5aa46c 100644
--- a/engine/apps/schedules/models/custom_on_call_shift.py
+++ b/engine/apps/schedules/models/custom_on_call_shift.py
@@ -226,19 +226,6 @@ class CustomOnCallShift(models.Model):
for schedule in schedules_to_update:
self.start_drop_ical_and_check_schedule_tasks(schedule)
- @property
- def event_is_started(self):
- return bool(self.rotation_start <= timezone.now())
-
- @property
- def event_is_finished(self):
- if self.frequency is not None:
- is_finished = bool(self.until <= timezone.now()) if self.until else False
- else:
- is_finished = bool(self.start + self.duration <= timezone.now())
-
- return is_finished
-
@property
def repr_settings_for_client_side_logging(self) -> str:
"""
@@ -273,31 +260,70 @@ class CustomOnCallShift(models.Model):
)
return result
+ @property
+ def event_is_started(self):
+ return bool(self.rotation_start <= timezone.now())
+
+ @property
+ def event_is_finished(self):
+ if self.frequency is not None:
+ is_finished = bool(self.until <= timezone.now()) if self.until else False
+ else:
+ is_finished = bool(self.start + self.duration <= timezone.now())
+
+ return is_finished
+
def convert_to_ical(self, time_zone="UTC"):
result = ""
# use shift time_zone if it exists, otherwise use schedule or default time_zone
time_zone = self.time_zone if self.time_zone is not None else time_zone
# rolling_users shift converts to several ical events
if self.type in (CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, CustomOnCallShift.TYPE_OVERRIDE):
- event_ical = None
+ # generate initial iCal for counting rotation start date
+ event_ical = self.generate_ical(self.start, user_counter=0)
+ rotations_created = 0
+ all_rotation_checked = False
+
users_queue = self.get_rolling_users()
- for counter, users in enumerate(users_queue, start=1):
- start = self.get_next_start_date(event_ical)
- if not start: # means that rotation ends before next event starts
- break
- for user_counter, user in enumerate(users, start=1):
- event_ical = self.generate_ical(user, start, user_counter, counter, time_zone)
- result += event_ical
+ if not users_queue:
+ return result
+ if self.frequency is None:
+ users_queue = users_queue[:1]
+
+ # Get the date of the current rotation
+ if self.start == self.rotation_start or self.frequency is None:
+ start = self.start
+ else:
+ start = self.get_rotation_date(event_ical)
+
+ while not all_rotation_checked:
+ for counter, users in enumerate(users_queue, start=1):
+ if not start: # means that rotation ends before next event starts
+ all_rotation_checked = True
+ break
+ elif start >= self.rotation_start: # event has already started, generate iCal for each user
+ for user_counter, user in enumerate(users, start=1):
+ event_ical = self.generate_ical(start, user_counter, user, counter, time_zone)
+ result += event_ical
+ rotations_created += 1
+ else: # generate default iCal to calculate the date for the next rotation
+ event_ical = self.generate_ical(start, user_counter=0)
+
+ if rotations_created == len(users_queue): # means that we generated iCal for every user group
+ all_rotation_checked = True
+ break
+ # Use the flag 'get_next_date' to get the date of the next rotation
+ start = self.get_rotation_date(event_ical, get_next_date=True)
else:
for user_counter, user in enumerate(self.users.all(), start=1):
- result += self.generate_ical(user, self.start, user_counter, time_zone=time_zone)
+ result += self.generate_ical(self.start, user_counter, user, time_zone=time_zone)
return result
- def generate_ical(self, user, start, user_counter, counter=1, time_zone="UTC"):
- # create event for each user in a list because we can't parse multiple users from ical summary
+ def generate_ical(self, start, user_counter, user=None, counter=1, time_zone="UTC"):
event = Event()
event["uid"] = f"oncall-{self.uuid}-PK{self.public_primary_key}-U{user_counter}-E{counter}-S{self.source}"
- event.add("summary", self.get_summary_with_user_for_ical(user))
+ if user:
+ event.add("summary", self.get_summary_with_user_for_ical(user))
event.add("dtstart", self.convert_dt_to_schedule_timezone(start, time_zone))
event.add("dtend", self.convert_dt_to_schedule_timezone(start + self.duration, time_zone))
event.add("dtstamp", timezone.now())
@@ -317,39 +343,61 @@ class CustomOnCallShift(models.Model):
summary += f"{user.username} "
return summary
- def get_next_start_date(self, event_ical):
+ def get_rotation_date(self, event_ical, get_next_date=False):
"""Get date of the next event (for rolling_users shifts)"""
- if event_ical is None:
- return self.start
+ ONE_DAY = 1
+ ONE_HOUR = 1
+
current_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
- current_event["rrule"]["INTERVAL"] = self.interval or 1
+ interval = self.interval or 1
+ current_event["rrule"]["INTERVAL"] = interval
current_event_start = current_event["DTSTART"].dt
next_event_start = current_event_start
- ONE_DAY = 1
+ # Calculate the minimum start date for the next event based on rotation frequency. We don't need to do this
+ # for the first rotation, because in this case the min start date will be the same as the current event date.
+ if get_next_date:
+ if self.frequency == CustomOnCallShift.FREQUENCY_HOURLY:
+ next_event_start = current_event_start + timezone.timedelta(hours=ONE_HOUR)
+ elif self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
+ next_event_start = current_event_start + timezone.timedelta(days=ONE_DAY)
+ elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
+ DAYS_IN_A_WEEK = 7
+ # count days before the next week starts
+ days_for_next_event = DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
+ if days_for_next_event > DAYS_IN_A_WEEK:
+ days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
+ # count next event start date with respect to event interval
+ next_event_start = current_event_start + timezone.timedelta(
+ days=days_for_next_event + DAYS_IN_A_WEEK * (interval - 1)
+ )
+ elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
+ DAYS_IN_A_MONTH = monthrange(current_event_start.year, current_event_start.month)[1]
+ # count days before the next month starts
+ days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
+ # count next event start date with respect to event interval
+ for i in range(1, interval):
+ next_month_days = monthrange(current_event_start.year, current_event_start.month + i)[1]
+ days_for_next_event += next_month_days
+ next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
- if self.frequency == CustomOnCallShift.FREQUENCY_HOURLY:
- next_event_start = current_event_start + timezone.timedelta(hours=1)
- elif self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
- # test daily with byday
- next_event_start = current_event_start + timezone.timedelta(days=ONE_DAY)
- elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
+ end_date = None
+ # get the period for calculating the current rotation end date for long events with frequency weekly and monthly
+ if self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
- days_for_next_event = DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
- if days_for_next_event > DAYS_IN_A_WEEK:
- days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
- next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
+ days_diff = 0
+ # get the last day of the week with respect to the week_start
+ if next_event_start.weekday() != self.week_start:
+ days_diff = DAYS_IN_A_WEEK + next_event_start.weekday() - self.week_start
+ days_diff %= DAYS_IN_A_WEEK
+ end_date = next_event_start + timezone.timedelta(days=DAYS_IN_A_WEEK - days_diff - ONE_DAY)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
- DAYS_IN_A_MONTH = monthrange(self.start.year, self.start.month)[1]
- # count days before the next month starts
- days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
- if days_for_next_event > DAYS_IN_A_MONTH:
- days_for_next_event = days_for_next_event % DAYS_IN_A_MONTH
- next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
+ # get the last day of the month
+ current_day_number = next_event_start.day
+ number_of_days = monthrange(next_event_start.year, next_event_start.month)[1]
+ days_diff = number_of_days - current_day_number
+ end_date = next_event_start + timezone.timedelta(days=days_diff)
- # check if rotation ends before next event starts
- if self.until and next_event_start > self.until:
- return
next_event = None
# repetitions generate the next event shift according with the recurrence rules
repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
@@ -357,10 +405,22 @@ class CustomOnCallShift(models.Model):
)
ical_iter = repetitions.__iter__()
for event in ical_iter:
- if event.start.date() >= next_event_start.date():
- next_event = event
- break
- next_event_dt = next_event.start if next_event is not None else None
+ if end_date: # end_date exists for long events with frequency weekly and monthly
+ if end_date >= event.start >= next_event_start:
+ if event.start >= self.rotation_start:
+ next_event = event
+ break
+ else:
+ break
+ else:
+ if event.start >= next_event_start:
+ next_event = event
+ break
+
+ next_event_dt = next_event.start if next_event is not None else next_event_start
+
+ if self.until and next_event_dt > self.until:
+ return
return next_event_dt
@cached_property
diff --git a/engine/apps/schedules/tests/test_custom_on_call_shift.py b/engine/apps/schedules/tests/test_custom_on_call_shift.py
index 70ec55b0..ab77dc1b 100644
--- a/engine/apps/schedules/tests/test_custom_on_call_shift.py
+++ b/engine/apps/schedules/tests/test_custom_on_call_shift.py
@@ -1,3 +1,5 @@
+from calendar import monthrange
+
import pytest
from django.utils import timezone
@@ -38,7 +40,7 @@ def test_get_on_call_users_from_web_schedule_override(make_organization_and_user
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
- date = timezone.now().replace(tzinfo=None, microsecond=0)
+ date = timezone.now().replace(microsecond=0)
data = {
"start": date,
@@ -105,7 +107,7 @@ def test_get_on_call_users_from_web_schedule_recurrent_event(
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
- date = timezone.now().replace(tzinfo=None, microsecond=0)
+ date = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
@@ -148,7 +150,7 @@ def test_get_on_call_users_from_rolling_users_event(
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
- now = timezone.now().replace(tzinfo=None, microsecond=0)
+ now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
@@ -190,6 +192,581 @@ def test_get_on_call_users_from_rolling_users_event(
assert len(users_on_call) == 0
+@pytest.mark.django_db
+def test_rolling_users_event_with_interval_hourly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "rotation_start": now + timezone.timedelta(hours=1),
+ "duration": timezone.timedelta(seconds=600),
+ "frequency": CustomOnCallShift.FREQUENCY_HOURLY,
+ "interval": 2,
+ "schedule": schedule,
+ }
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+
+ user_1_on_call_dates = [date + timezone.timedelta(hours=4)]
+ user_2_on_call_dates = [date + timezone.timedelta(hours=2), date + timezone.timedelta(hours=6)]
+ nobody_on_call_dates = [
+ date,
+ date + timezone.timedelta(hours=1),
+ date + timezone.timedelta(hours=3),
+ date + timezone.timedelta(hours=5),
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for date in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, date)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_event_with_interval_daily(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "rotation_start": now,
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_DAILY,
+ "interval": 2,
+ "schedule": schedule,
+ }
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+
+ user_1_on_call_dates = [date, date + timezone.timedelta(days=4)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=2), date + timezone.timedelta(days=6)]
+ nobody_on_call_dates = [
+ date + timezone.timedelta(days=1),
+ date + timezone.timedelta(days=3),
+ date + timezone.timedelta(days=5),
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_event_with_interval_weekly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "rotation_start": now + timezone.timedelta(hours=1),
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
+ "interval": 2,
+ "week_start": now.weekday(),
+ "schedule": schedule,
+ }
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ date = now + timezone.timedelta(minutes=5)
+
+ user_1_on_call_dates = [date + timezone.timedelta(days=28)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=14), date + timezone.timedelta(days=42)]
+ nobody_on_call_dates = [
+ date,
+ date + timezone.timedelta(days=7),
+ date + timezone.timedelta(days=21),
+ date + timezone.timedelta(days=35),
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_event_with_interval_monthly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
+ start_date = timezone.now().replace(day=1, microsecond=0)
+ days_for_next_month_1 = monthrange(start_date.year, start_date.month)[1]
+ days_for_next_month_2 = monthrange(start_date.year, start_date.month + 1)[1] + days_for_next_month_1
+ days_for_next_month_3 = monthrange(start_date.year, start_date.month + 2)[1] + days_for_next_month_2
+ days_for_next_month_4 = monthrange(start_date.year, start_date.month + 3)[1] + days_for_next_month_3
+
+ data = {
+ "priority_level": 1,
+ "start": start_date,
+ "rotation_start": start_date + timezone.timedelta(hours=1),
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
+ "interval": 2,
+ "week_start": start_date.weekday(),
+ "schedule": schedule,
+ }
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+ schedule.custom_on_call_shifts.add(on_call_shift)
+
+ date = start_date + timezone.timedelta(minutes=5)
+
+ user_1_on_call_dates = [date + timezone.timedelta(days=days_for_next_month_4)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=days_for_next_month_2)]
+ nobody_on_call_dates = [
+ date,
+ date + timezone.timedelta(days=days_for_next_month_1),
+ date + timezone.timedelta(days=days_for_next_month_3),
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_hourly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "rotation_start": now + timezone.timedelta(hours=2),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_HOURLY,
+ "schedule": schedule,
+ "until": now + timezone.timedelta(hours=6, minutes=59),
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+ # rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
+ user_1_on_call_dates = [date + timezone.timedelta(hours=3), date + timezone.timedelta(hours=6)]
+ user_2_on_call_dates = [date + timezone.timedelta(hours=4)]
+ user_3_on_call_dates = [date + timezone.timedelta(hours=2), date + timezone.timedelta(hours=5)]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(hours=1), # less than rotation start
+ date + timezone.timedelta(hours=7), # higher than until
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_daily(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "rotation_start": now + timezone.timedelta(days=1, hours=1),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_DAILY,
+ "schedule": schedule,
+ "until": now + timezone.timedelta(days=6, minutes=1),
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+ # rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
+ user_1_on_call_dates = [date + timezone.timedelta(days=3), date + timezone.timedelta(days=6)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=4)]
+ user_3_on_call_dates = [date + timezone.timedelta(days=2), date + timezone.timedelta(days=5)]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(days=1), # less than rotation start
+ date + timezone.timedelta(days=7), # higher than until
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_weekly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "week_start": now.weekday(),
+ "rotation_start": now + timezone.timedelta(days=7, hours=1),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
+ "schedule": schedule,
+ "until": now + timezone.timedelta(days=42, minutes=1),
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+ # rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
+ user_1_on_call_dates = [date + timezone.timedelta(days=21), date + timezone.timedelta(days=42)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=28)]
+ user_3_on_call_dates = [date + timezone.timedelta(days=14), date + timezone.timedelta(days=35)]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(days=7), # less than rotation start
+ date + timezone.timedelta(days=43), # higher than until
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(microsecond=0)
+ today_weekday = now.weekday()
+ weekdays = [(today_weekday + 1) % 7, (today_weekday + 3) % 7]
+ by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "week_start": today_weekday,
+ "rotation_start": now + timezone.timedelta(days=8, hours=1),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
+ "schedule": schedule,
+ "until": now + timezone.timedelta(days=23, minutes=1),
+ "by_day": by_day,
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+
+ # week 1: weekdays[0] - no (+1 day from start) ; weekdays[1] - no (+3 days from start) user_1
+ # week 2: weekdays[0] - no (+8 days from start) ; weekdays[1] - yes (+10 days from start) user_2
+ # week 3: weekdays[0] - yes (+15 days from start) ; weekdays[1] - yes (+17 days from start) user_3
+ # week 4: weekdays[0] - yes (+22 days from start) ; weekdays[1] - no (+24 days from start) user_1
+ user_1_on_call_dates = [date + timezone.timedelta(days=22)]
+ user_2_on_call_dates = [date + timezone.timedelta(days=10)]
+ user_3_on_call_dates = [date + timezone.timedelta(days=15), date + timezone.timedelta(days=17)]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(days=1), # less than rotation start
+ date + timezone.timedelta(days=3), # less than rotation start
+ date + timezone.timedelta(days=8), # less than rotation start
+ date + timezone.timedelta(days=9), # weekday value not in by_day
+ date + timezone.timedelta(days=24), # higher than until
+ ]
+
+ for dt in user_1_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_1 in users_on_call
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_monthly(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ now = timezone.now().replace(day=1, microsecond=0)
+ days_in_curr_month = monthrange(now.year, now.month)[1]
+ days_in_next_month = monthrange(now.year, now.month + 1)[1]
+
+ data = {
+ "priority_level": 1,
+ "start": now,
+ "week_start": now.weekday(),
+ "rotation_start": now + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
+ "schedule": schedule,
+ "until": now + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = now + timezone.timedelta(minutes=5)
+ # rotation starts from user_2, because user_1 started earlier than rotation start date
+ user_2_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month)]
+ user_3_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month + days_in_next_month)]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(days=days_in_curr_month - 1), # less than rotation start
+ date + timezone.timedelta(days=days_in_curr_month + 1), # higher than event end
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 2), # higher than event end
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 11), # higher than until
+ ]
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
+@pytest.mark.django_db
+def test_rolling_users_with_diff_start_and_rotation_start_monthly_by_monthday(
+ make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+ user_3 = make_user_for_organization(organization)
+
+ schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
+ start_date = timezone.now().replace(day=1, microsecond=0)
+ days_in_curr_month = monthrange(start_date.year, start_date.month)[1]
+ days_in_next_month = monthrange(start_date.year, start_date.month + 1)[1]
+
+ data = {
+ "priority_level": 1,
+ "start": start_date,
+ "week_start": start_date.weekday(),
+ "rotation_start": start_date + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
+ "duration": timezone.timedelta(seconds=1800),
+ "frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
+ "schedule": schedule,
+ "until": start_date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
+ "by_monthday": [i for i in range(1, 5)],
+ }
+ rolling_users = [[user_1], [user_2], [user_3]]
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ on_call_shift.add_rolling_users(rolling_users)
+
+ date = start_date + timezone.timedelta(minutes=5)
+ # rotation starts from user_2, because user_1 started earlier than rotation start date
+ user_2_on_call_dates = [
+ date + timezone.timedelta(days=days_in_curr_month),
+ date + timezone.timedelta(days=days_in_curr_month + 1),
+ date + timezone.timedelta(days=days_in_curr_month + 2),
+ date + timezone.timedelta(days=days_in_curr_month + 3),
+ ]
+ user_3_on_call_dates = [
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month),
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 1),
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 2),
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 3),
+ ]
+ nobody_on_call_dates = [
+ date, # less than rotation start
+ date + timezone.timedelta(days=3), # less than rotation start
+ date + timezone.timedelta(days=days_in_curr_month + 4), # out of by_monthday range
+ date + timezone.timedelta(days=days_in_curr_month + 6), # out of by_monthday range
+ date + timezone.timedelta(days=days_in_curr_month + 10), # out of by_monthday range
+ date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 11), # higher than until
+ ]
+
+ for dt in user_2_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_2 in users_on_call
+
+ for dt in user_3_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 1
+ assert user_3 in users_on_call
+
+ for dt in nobody_on_call_dates:
+ users_on_call = list_users_to_notify_from_ical(schedule, dt)
+ assert len(users_on_call) == 0
+
+
@pytest.mark.django_db
def test_get_oncall_users_for_empty_schedule(
make_organization,
@@ -299,3 +876,39 @@ def test_shift_convert_to_ical(make_organization_and_user, make_on_call_shift):
ical_rrule_until = on_call_shift.until.strftime("%Y%m%dT%H%M%S")
expected_rrule = f"RRULE:FREQ=HOURLY;UNTIL={ical_rrule_until}Z;INTERVAL=1;WKST=SU"
assert expected_rrule in ical_data
+
+
+@pytest.mark.django_db
+def test_rolling_users_shift_convert_to_ical(
+ make_organization_and_user,
+ make_user_for_organization,
+ make_on_call_shift,
+):
+ organization, user_1 = make_organization_and_user()
+ user_2 = make_user_for_organization(organization)
+
+ date = timezone.now().replace(microsecond=0)
+ until = date + timezone.timedelta(days=30)
+
+ data = {
+ "priority_level": 1,
+ "start": date,
+ "rotation_start": date,
+ "duration": timezone.timedelta(seconds=10800),
+ "frequency": CustomOnCallShift.FREQUENCY_HOURLY,
+ "interval": 2,
+ "until": until,
+ }
+
+ on_call_shift = make_on_call_shift(
+ organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
+ )
+ rolling_users = [[user_1], [user_2]]
+ on_call_shift.add_rolling_users(rolling_users)
+
+ ical_data = on_call_shift.convert_to_ical()
+ ical_rrule_until = on_call_shift.until.strftime("%Y%m%dT%H%M%S")
+ expected_rrule = f"RRULE:FREQ=HOURLY;UNTIL={ical_rrule_until}Z;INTERVAL=4;WKST=SU"
+
+ assert on_call_shift.event_interval == len(rolling_users) * data["interval"]
+ assert expected_rrule in ical_data
diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py
index 1ccba05f..588b70d0 100644
--- a/engine/apps/slack/scenarios/alertgroup_appearance.py
+++ b/engine/apps/slack/scenarios/alertgroup_appearance.py
@@ -247,10 +247,6 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
if new_value is None and old_value is not None:
setattr(alert_receive_channel, attr_name, None)
alert_receive_channel.save()
- # Drop caches for current alert group
- if notification_channel == "web":
- setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
- alert_group.save()
elif new_value is not None:
default_values = getattr(
AlertReceiveChannel,
@@ -265,18 +261,10 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
jinja_template_env.from_string(new_value)
setattr(alert_receive_channel, attr_name, new_value)
alert_receive_channel.save()
- # Drop caches for current alert group
- if notification_channel == "web":
- setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
- alert_group.save()
elif default_value is not None and new_value.strip() == default_value.strip():
new_value = None
setattr(alert_receive_channel, attr_name, new_value)
alert_receive_channel.save()
- # Drop caches for current alert group
- if notification_channel == "web":
- setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
- alert_group.save()
except TemplateSyntaxError:
return Response(
{"response_action": "errors", "errors": {attr_name: "Template has incorrect format"}},
diff --git a/engine/apps/slack/scenarios/resolution_note.py b/engine/apps/slack/scenarios/resolution_note.py
index 364704b7..f6c78305 100644
--- a/engine/apps/slack/scenarios/resolution_note.py
+++ b/engine/apps/slack/scenarios/resolution_note.py
@@ -674,7 +674,6 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
add_to_resolution_note = True if value["msg_value"].startswith("add") else False
slack_thread_message = None
resolution_note = None
- drop_ag_cache = False
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
@@ -695,7 +694,6 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
else:
resolution_note.recreate()
self.add_resolution_note_reaction(slack_thread_message)
- drop_ag_cache = True
elif not add_to_resolution_note:
# Check if resolution_note can be removed
if (
@@ -720,13 +718,9 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
slack_thread_message.added_to_resolution_note = False
slack_thread_message.save(update_fields=["added_to_resolution_note"])
self.remove_resolution_note_reaction(slack_thread_message)
- drop_ag_cache = True
self.update_alert_group_resolution_note_button(
alert_group,
)
- if drop_ag_cache:
- alert_group.drop_cached_after_resolve_report_json()
- alert_group.schedule_cache_for_web()
resolution_note_data = json.loads(payload["actions"][0]["value"])
resolution_note_data["resolution_note_window_action"] = "edit_update"
ResolutionNoteModalStep(slack_team_identity, self.organization, self.user).process_scenario(
diff --git a/engine/apps/social_auth/live_setting_django_strategy.py b/engine/apps/social_auth/live_setting_django_strategy.py
index a8deb5d8..62f14d6e 100644
--- a/engine/apps/social_auth/live_setting_django_strategy.py
+++ b/engine/apps/social_auth/live_setting_django_strategy.py
@@ -39,8 +39,6 @@ class LiveSettingDjangoStrategy(DjangoStrategy):
"""
if live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None:
return create_engine_url(path, override_base=live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST)
- if settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None:
- return create_engine_url(path, override_base=settings.SLACK_INSTALL_RETURN_REDIRECT_HOST)
if self.request:
return self.request.build_absolute_uri(path)
else:
diff --git a/engine/apps/telegram/client.py b/engine/apps/telegram/client.py
index 3856d371..f6983d22 100644
--- a/engine/apps/telegram/client.py
+++ b/engine/apps/telegram/client.py
@@ -36,9 +36,6 @@ class TelegramClient:
def register_webhook(self, webhook_url: Optional[str] = None) -> None:
webhook_url = webhook_url or create_engine_url("/telegram/", override_base=live_settings.TELEGRAM_WEBHOOK_HOST)
- if webhook_url is None:
- webhook_url = live_settings.TELEGRAM_WEBHOOK_URL
-
webhook_info = self.api_client.get_webhook_info()
if webhook_info.url == webhook_url:
return
diff --git a/engine/apps/telegram/decorators.py b/engine/apps/telegram/decorators.py
index a5560e4b..bfc29a79 100644
--- a/engine/apps/telegram/decorators.py
+++ b/engine/apps/telegram/decorators.py
@@ -1,7 +1,6 @@
import logging
from functools import wraps
-from django.core.exceptions import ImproperlyConfigured
from telegram import error
from apps.telegram.client import TelegramClient
@@ -14,7 +13,7 @@ def handle_missing_token(f):
def decorated(*args, **kwargs):
try:
TelegramClient()
- except (ImproperlyConfigured, error.InvalidToken) as e:
+ except error.InvalidToken as e:
logger.warning(
"Tried to initialize a Telegram client, but TELEGRAM_TOKEN live setting is invalid or missing. "
f"Exception: {e}"
diff --git a/engine/apps/telegram/models/connectors/personal.py b/engine/apps/telegram/models/connectors/personal.py
index 4b0533d8..895c6a62 100644
--- a/engine/apps/telegram/models/connectors/personal.py
+++ b/engine/apps/telegram/models/connectors/personal.py
@@ -1,4 +1,3 @@
-from django.core.exceptions import ImproperlyConfigured
from django.db import models
from telegram import error
@@ -67,7 +66,7 @@ class TelegramToUserConnector(models.Model):
def send_full_incident(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy) -> None:
try:
telegram_client = TelegramClient()
- except (ImproperlyConfigured, error.InvalidToken):
+ except error.InvalidToken:
TelegramToUserConnector.create_telegram_notification_error(
alert_group,
self.user,
@@ -125,7 +124,7 @@ class TelegramToUserConnector(models.Model):
def send_link_to_channel_message(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy):
try:
telegram_client = TelegramClient()
- except (ImproperlyConfigured, error.InvalidToken):
+ except error.InvalidToken:
TelegramToUserConnector.create_telegram_notification_error(
alert_group,
self.user,
diff --git a/engine/apps/telegram/tasks.py b/engine/apps/telegram/tasks.py
index fb1b2f96..3da5d18a 100644
--- a/engine/apps/telegram/tasks.py
+++ b/engine/apps/telegram/tasks.py
@@ -33,7 +33,7 @@ def register_telegram_webhook(token=None):
try:
telegram_client.register_webhook()
- except (error.InvalidToken, error.Unauthorized) as e:
+ except (error.InvalidToken, error.Unauthorized, error.BadRequest) as e:
logger.warning(f"Tried to register Telegram webhook using token: {telegram_client.token}, got error: {e}")
diff --git a/engine/apps/twilioapp/models/phone_call.py b/engine/apps/twilioapp/models/phone_call.py
index 69893b8a..b0db9f91 100644
--- a/engine/apps/twilioapp/models/phone_call.py
+++ b/engine/apps/twilioapp/models/phone_call.py
@@ -251,7 +251,7 @@ class PhoneCall(models.Model):
if phone_calls_left < 3:
message_body += " {} phone calls left. Contact your admin.".format(phone_calls_left)
- twilio_call = twilio_client.make_call(message_body, user.verified_phone_number)
+ twilio_call = twilio_client.make_call(message_body, user.verified_phone_number, grafana_cloud=grafana_cloud)
if twilio_call.status and twilio_call.sid:
phone_call.status = TwilioCallStatuses.DETERMINANT.get(twilio_call.status, None)
phone_call.sid = twilio_call.sid
diff --git a/engine/apps/twilioapp/tests/test_phone_calls.py b/engine/apps/twilioapp/tests/test_phone_calls.py
index 22c64a3f..17ec3556 100644
--- a/engine/apps/twilioapp/tests/test_phone_calls.py
+++ b/engine/apps/twilioapp/tests/test_phone_calls.py
@@ -1,3 +1,4 @@
+import urllib
from unittest import mock
import pytest
@@ -11,6 +12,13 @@ from rest_framework.test import APIClient
from apps.base.models import UserNotificationPolicy
from apps.twilioapp.constants import TwilioCallStatuses
from apps.twilioapp.models import PhoneCall
+from apps.twilioapp.utils import get_gather_message
+
+
+class FakeTwilioCall:
+ def __init__(self):
+ self.sid = "123"
+ self.status = TwilioCallStatuses.COMPLETED
@pytest.fixture
@@ -268,3 +276,58 @@ def test_wrong_pressed_digit(mock_has_permission, mock_get_gather_url, phone_cal
assert response.status_code == 200
assert "Wrong digit" in content
+
+
+@mock.patch("apps.twilioapp.twilio_client.Client")
+@pytest.mark.django_db
+def test_make_cloud_phone_call_not_gathering_digit(mock_twilio_client, make_organization, make_user):
+ organization = make_organization()
+ user = make_user(organization=organization, _verified_phone_number="9999555")
+ mock_twilio_client.return_value.calls.create.return_value = FakeTwilioCall()
+
+ PhoneCall.make_grafana_cloud_call(user, "the message")
+
+ gather_message = urllib.parse.quote(get_gather_message())
+ assert gather_message not in mock_twilio_client.return_value.calls.create.call_args.kwargs["url"]
+
+
+@mock.patch("apps.twilioapp.twilio_client.Client")
+@pytest.mark.django_db
+def test_make_phone_call_gathering_digit(
+ mock_twilio_client,
+ make_organization,
+ make_user,
+ make_user_notification_policy,
+ make_alert_receive_channel,
+ make_alert_group,
+ make_alert,
+):
+ organization = make_organization()
+ user = make_user(organization=organization, _verified_phone_number="9999555")
+ alert_receive_channel = make_alert_receive_channel(organization)
+ alert_group = make_alert_group(alert_receive_channel)
+ notification_policy = make_user_notification_policy(
+ user=user,
+ step=UserNotificationPolicy.Step.NOTIFY,
+ notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
+ )
+ make_alert(
+ alert_group,
+ raw_request_data={
+ "status": "firing",
+ "labels": {
+ "alertname": "TestAlert",
+ "region": "eu-1",
+ },
+ "annotations": {},
+ "startsAt": "2018-12-25T15:47:47.377363608Z",
+ "endsAt": "0001-01-01T00:00:00Z",
+ "generatorURL": "",
+ },
+ )
+ mock_twilio_client.return_value.calls.create.return_value = FakeTwilioCall()
+
+ PhoneCall.make_call(user, alert_group, notification_policy)
+
+ gather_message = urllib.parse.quote(get_gather_message())
+ assert gather_message in mock_twilio_client.return_value.calls.create.call_args.kwargs["url"]
diff --git a/engine/apps/twilioapp/twilio_client.py b/engine/apps/twilioapp/twilio_client.py
index b1e07e2a..007d9e72 100644
--- a/engine/apps/twilioapp/twilio_client.py
+++ b/engine/apps/twilioapp/twilio_client.py
@@ -126,19 +126,22 @@ class TwilioClient:
)
self.make_call(message=message, to=to)
- def make_call(self, message, to):
+ def make_call(self, message, to, grafana_cloud=False):
try:
start_message = message.replace('"', "")
- twiml_query = urllib.parse.quote(
+ gather_message = (
(
- f"