Merge remote-tracking branch 'origin/on-call-shifts-generate-ical-update' into new-schedules

This commit is contained in:
Maxim 2022-08-15 16:58:08 +03:00
commit dda55e8f2b
69 changed files with 1653 additions and 816 deletions

View file

@ -6,7 +6,7 @@ SLACK_API_TOKEN=
SLACK_API_TOKEN_COMMON=
SLACK_SLASH_COMMAND_NAME=/oncall
TELEGRAM_WEBHOOK_URL=
TELEGRAM_WEBHOOK_HOST=
TELEGRAM_TOKEN=
TWILIO_ACCOUNT_SID=

View file

@ -1,5 +1,9 @@
# Change Log
## v1.0.13 (2022-07-27)
- Optimize alert group list view
- Fix a bug related to Twilio setup
## v1.0.12 (2022-07-26)
- Update push-notifications dependency
- Rework how absolute URLs are built

View file

@ -38,7 +38,7 @@ GRAFANA_PASSWORD=admin" > .env_hobby
3. Launch services:
```bash
docker-compose --env-file .env_hobby -f docker-compose.yml up --build -d
docker-compose --env-file .env_hobby -f docker-compose.yml up -d
```
4. Issue one-time invite token:

View file

@ -18,9 +18,12 @@ class AlertBaseRenderer(ABC):
class AlertGroupBaseRenderer(ABC):
def __init__(self, alert_group):
def __init__(self, alert_group, alert=None):
if alert is None:
alert = alert_group.alerts.first()
self.alert_group = alert_group
self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.first())
self.alert_renderer = self.alert_renderer_class(alert)
@property
@abstractmethod

View file

@ -20,11 +20,11 @@ class AlertClassicMarkdownRenderer(AlertBaseRenderer):
class AlertGroupClassicMarkdownRenderer(AlertGroupBaseRenderer):
def __init__(self, alert_group):
super().__init__(alert_group)
def __init__(self, alert_group, alert=None):
if alert is None:
alert = alert_group.alerts.last()
# use the last alert to render content
self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
super().__init__(alert_group, alert)
@property
def alert_renderer_class(self):

View file

@ -20,11 +20,11 @@ class AlertWebRenderer(AlertBaseRenderer):
class AlertGroupWebRenderer(AlertGroupBaseRenderer):
def __init__(self, alert_group):
super().__init__(alert_group)
def __init__(self, alert_group, alert=None):
if alert is None:
alert = alert_group.alerts.last()
# use the last alert to render content
self.alert_renderer = self.alert_renderer_class(self.alert_group.alerts.last())
super().__init__(alert_group, alert)
@property
def alert_renderer_class(self):

View file

@ -0,0 +1,17 @@
# Generated by Django 3.2.13 on 2022-07-27 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0005_alertgroup_cached_render_for_web'),
]
operations = [
migrations.AddIndex(
model_name='alertgroup',
index=models.Index(fields=['channel_id', 'resolved', 'acknowledged', 'silenced', 'root_alert_group_id', 'is_archived'], name='alerts_aler_channel_ee84a7_idx'),
),
]

View file

@ -5,7 +5,7 @@ from uuid import uuid4
from django.apps import apps
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db import models
from django.db.models import JSONField
from django.db.models.signals import post_save
@ -261,9 +261,6 @@ def listen_for_alert_model_save(sender, instance, created, *args, **kwargs):
else:
distribute_alert.apply_async((instance.pk,), countdown=TASK_DELAY_SECONDS)
logger.info(f"Recalculate AG cache. Reason: save alert model {instance.pk}")
transaction.on_commit(instance.group.schedule_cache_for_web)
# Connect signal to base Alert class
post_save.connect(listen_for_alert_model_save, Alert)

View file

@ -8,12 +8,9 @@ import pytz
from celery import uuid as celery_uuid
from django.apps import apps
from django.conf import settings
from django.core.cache import cache
from django.core.validators import MinLengthValidator
from django.db import IntegrityError, models, transaction
from django.db import IntegrityError, models
from django.db.models import JSONField, Q, QuerySet
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
@ -22,16 +19,9 @@ from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_T
from apps.alerts.incident_appearance.renderers.slack_renderer import AlertGroupSlackRenderer
from apps.alerts.incident_log_builder import IncidentLogBuilder
from apps.alerts.signals import alert_group_action_triggered_signal
from apps.alerts.tasks import (
acknowledge_reminder_task,
call_ack_url,
schedule_cache_for_alert_group,
send_alert_group_signal,
unsilence_task,
)
from apps.alerts.tasks import acknowledge_reminder_task, call_ack_url, send_alert_group_signal, unsilence_task
from apps.slack.slack_formatter import SlackFormatter
from apps.user_management.models import User
from common.mixins.use_random_readonly_db_manager_mixin import UseRandomReadonlyDbManagerMixin
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
from common.utils import clean_markup, str_or_backup
@ -108,10 +98,6 @@ class UnarchivedAlertGroupQuerySet(models.QuerySet):
return super().filter(*args, **kwargs, is_archived=False)
class AlertGroupManager(UseRandomReadonlyDbManagerMixin, models.Manager):
pass
class AlertGroupSlackRenderingMixin:
"""
Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
@ -134,8 +120,8 @@ class AlertGroupSlackRenderingMixin:
class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
all_objects = AlertGroupManager.from_queryset(AlertGroupQuerySet)()
unarchived_objects = AlertGroupManager.from_queryset(UnarchivedAlertGroupQuerySet)()
all_objects = AlertGroupQuerySet.as_manager()
unarchived_objects = UnarchivedAlertGroupQuerySet.as_manager()
(
NEW,
@ -242,8 +228,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
active_escalation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
active_resolve_calculation_id = models.CharField(max_length=100, null=True, default=None) # ID generated by celery
# ID generated by celery
active_cache_for_web_calculation_id = models.CharField(max_length=100, null=True, default=None)
SILENCE_DELAY_OPTIONS = (
(1800, "30 minutes"),
@ -315,7 +299,9 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
related_name="dependent_alert_groups",
)
cached_render_for_web = JSONField(default=dict)
# cached_render_for_web and active_cache_for_web_calculation_id are deprecated
cached_render_for_web = models.JSONField(default=dict)
active_cache_for_web_calculation_id = models.CharField(max_length=100, null=True, default=None)
last_unique_unacknowledge_process_id = models.CharField(max_length=100, null=True, default=None)
is_archived = models.BooleanField(default=False)
@ -364,6 +350,11 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
"distinction",
"is_open_for_grouping",
]
indexes = [
models.Index(
fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id", "is_archived"]
),
]
def __str__(self):
return f"{self.pk}: {self.verbose_name}"
@ -404,18 +395,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
def is_alert_a_resolve_signal(self, alert):
raise NotImplementedError
def cache_for_web(self, organization):
from apps.api.serializers.alert_group import AlertGroupSerializer
# Re-take object to switch connection from readonly db to master.
_self = AlertGroup.all_objects.get(pk=self.pk)
_self.cached_render_for_web = AlertGroupSerializer(self, context={"organization": organization}).data
self.cached_render_for_web = _self.cached_render_for_web
_self.save(update_fields=["cached_render_for_web"])
def schedule_cache_for_web(self):
schedule_cache_for_alert_group.apply_async((self.pk,))
@property
def permalink(self):
if self.slack_message is not None:
@ -425,10 +404,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
def web_link(self):
return urljoin(self.channel.organization.web_link, f"?page=incident&id={self.public_primary_key}")
@property
def alerts_count(self):
return self.alerts.count()
@property
def happened_while_maintenance(self):
return self.root_alert_group is not None and self.root_alert_group.maintenance_uuid is not None
@ -449,10 +424,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Acknowledge button")
# clear resolve report cache
cache_key = "render_after_resolve_report_json_{}".format(self.pk)
cache.delete(cache_key)
self.acknowledge(acknowledged_by_user=user, acknowledged_by=AlertGroup.USER)
self.stop_escalation()
if self.is_root_alert_group:
@ -673,9 +644,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
log_record = self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user)
# clear resolve report cache
self.drop_cached_after_resolve_report_json()
if self.is_root_alert_group:
self.start_escalation_if_needed()
@ -848,10 +816,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.unresolve()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_RESOLVED, author=user, reason="Silence button")
# clear resolve report cache
cache_key = "render_after_resolve_report_json_{}".format(self.pk)
cache.delete(cache_key)
if self.acknowledged:
self.unacknowledge()
self.log_records.create(type=AlertGroupLogRecord.TYPE_UN_ACK, author=user, reason="Silence button")
@ -1060,8 +1024,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
author=user,
reason="Bulk action acknowledge",
)
# clear resolve report cache
alert_group.drop_cached_after_resolve_report_json()
for alert_group in alert_groups_to_unsilence_before_acknowledge_list:
alert_group.log_records.create(
@ -1194,8 +1156,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
reason="Bulk action restart",
)
alert_group.drop_cached_after_resolve_report_json()
if alert_group.is_root_alert_group:
alert_group.start_escalation_if_needed()
@ -1293,7 +1253,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
author=user,
reason="Bulk action silence",
)
alert_group.drop_cached_after_resolve_report_json()
for alert_group in alert_groups_to_unsilence_before_silence_list:
alert_group.log_records.create(
@ -1483,7 +1442,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
else:
return "Acknowledged"
def non_cached_after_resolve_report_json(self):
def render_after_resolve_report_json(self):
AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord")
UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord")
ResolutionNote = apps.get_model("alerts", "ResolutionNote")
@ -1501,21 +1460,6 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
result_log_report.append(log_record.render_log_line_json())
return result_log_report
def render_after_resolve_report_json(self):
cache_key = "render_after_resolve_report_json_{}".format(self.pk)
# cache.get_or_set in some cases returns None, so use get and set cache methods separately
log_report = cache.get(cache_key)
if log_report is None:
log_report = self.non_cached_after_resolve_report_json()
cache.set(cache_key, log_report)
return log_report
def drop_cached_after_resolve_report_json(self):
cache_key = "render_after_resolve_report_json_{}".format(self.pk)
if cache_key in cache:
cache.delete(cache_key)
@property
def has_resolution_notes(self):
return self.resolution_notes.exists()
@ -1595,14 +1539,3 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
)
return stop_escalation_log
@receiver(post_save, sender=AlertGroup)
def listen_for_alert_group_model_save(sender, instance, created, *args, **kwargs):
if (
kwargs is not None
and "update_fields" in kwargs
and kwargs["update_fields"] is dict
and "cached_render_for_web" not in kwargs["update_fields"]
):
transaction.on_commit(instance.schedule_cache_for_alert_group)

View file

@ -3,7 +3,7 @@ import logging
import humanize
from django.apps import apps
from django.db import models, transaction
from django.db import models
from django.db.models import JSONField
from django.db.models.signals import post_save
from django.dispatch import receiver
@ -546,7 +546,6 @@ class AlertGroupLogRecord(models.Model):
@receiver(post_save, sender=AlertGroupLogRecord)
def listen_for_alertgrouplogrecord(sender, instance, created, *args, **kwargs):
instance.alert_group.drop_cached_after_resolve_report_json()
if instance.type != AlertGroupLogRecord.TYPE_DELETED:
if not instance.alert_group.is_maintenance_incident:
alert_group_pk = instance.alert_group.pk
@ -555,6 +554,3 @@ def listen_for_alertgrouplogrecord(sender, instance, created, *args, **kwargs):
f"alert group event: {instance.get_type_display()}"
)
send_update_log_report_signal.apply_async(kwargs={"alert_group_pk": alert_group_pk}, countdown=8)
logger.info(f"Recalculate AG cache. Reason: save alert_group_log_record model {instance.pk}")
transaction.on_commit(instance.alert_group.schedule_cache_for_web)

View file

@ -19,11 +19,7 @@ from jinja2 import Template
from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager
from apps.alerts.integration_options_mixin import IntegrationOptionsMixin
from apps.alerts.models.maintainable_object import MaintainableObject
from apps.alerts.tasks import (
disable_maintenance,
invalidate_web_cache_for_alert_group,
sync_grafana_alerting_contact_points,
)
from apps.alerts.tasks import disable_maintenance, sync_grafana_alerting_contact_points
from apps.base.messaging import get_messaging_backend_from_id
from apps.base.utils import live_settings
from apps.integrations.metadata import heartbeat
@ -693,21 +689,6 @@ def listen_for_alertreceivechannel_model_save(sender, instance, created, *args,
create_organization_log(
instance.organization, None, OrganizationLogType.TYPE_HEARTBEAT_CREATED, description
)
else:
if kwargs is not None:
if "update_fields" in kwargs:
if kwargs["update_fields"] is not None:
fields_to_not_to_invalidate_cache = [
"rate_limit_message_task_id",
"rate_limited_in_slack_at",
"reason_to_skip_escalation",
]
# Hack to not to invalidate web cache on AlertReceiveChannel.start_send_rate_limit_message_task
for f in fields_to_not_to_invalidate_cache:
if f in kwargs["update_fields"]:
return
logger.info(f"Drop AG cache. Reason: save alert_receive_channel {instance.pk}")
invalidate_web_cache_for_alert_group.apply_async(kwargs={"channel_pk": instance.pk})
if instance.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
if created:

View file

@ -9,7 +9,7 @@ from .custom_button_result import custom_button_result # noqa: F401
from .delete_alert_group import delete_alert_group # noqa: F401
from .distribute_alert import distribute_alert # noqa: F401
from .escalate_alert_group import escalate_alert_group # noqa: F401
from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401
from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401, todo: remove
from .invite_user_to_join_incident import invite_user_to_join_incident # noqa: F401
from .maintenance import disable_maintenance # noqa: F401
from .notify_all import notify_all_task # noqa: F401

View file

@ -1,54 +1,19 @@
from celery.utils.log import get_task_logger
from django.apps import apps
from django.conf import settings
from django.core.cache import cache
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
logger = get_task_logger(__name__)
def get_cache_key_caching_alert_group_for_web(alert_group_pk):
CACHE_KEY_PREFIX = "cache_alert_group_for_web"
return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def schedule_cache_for_alert_group(alert_group_pk):
CACHE_FOR_ALERT_GROUP_LIFETIME = 60
START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
# todo: remove
pass
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def cache_alert_group_for_web(alert_group_pk):
"""
Async task to re-cache alert_group for web.
"""
cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
cached_task_id = cache.get(cache_key)
current_task_id = cache_alert_group_for_web.request.id
if cached_task_id is None:
return (
f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
)
if not current_task_id == cached_task_id or cached_task_id is None:
return (
f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
)
else:
AlertGroup = apps.get_model("alerts", "AlertGroup")
alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
alert_group.cache_for_web(alert_group.channel.organization)
logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")
# todo: remove
pass

View file

@ -1,32 +1,11 @@
from django.apps import apps
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
from .task_logger import task_logger
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def invalidate_web_cache_for_alert_group(org_pk=None, channel_pk=None, alert_group_pk=None, alert_group_pks=None):
AlertGroup = apps.get_model("alerts", "AlertGroup")
DynamicSetting = apps.get_model("base", "DynamicSetting")
if channel_pk:
task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_receive_channel {channel_pk}")
q = AlertGroup.all_objects.filter(channel__pk=channel_pk)
elif org_pk:
task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - organization {org_pk}")
q = AlertGroup.all_objects.filter(channel__organization__pk=org_pk)
elif alert_group_pk:
task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_group {alert_group_pk}")
q = AlertGroup.all_objects.filter(pk=alert_group_pk)
elif alert_group_pks:
task_logger.debug(f"invalidate_web_cache_for_alert_group: Reason - alert_groups {alert_group_pks}")
q = AlertGroup.all_objects.filter(pk__in=alert_group_pks)
skip_task = DynamicSetting.objects.get_or_create(name="skip_invalidate_web_cache_for_alert_group")[0]
if skip_task.boolean_value:
return "Task has been skipped because of skip_invalidate_web_cache_for_alert_group DynamicSetting"
q.update(cached_render_for_web={})
# todo: remove
pass

View file

@ -386,9 +386,7 @@ def perform_notification(log_record_pk):
"status": f"{alert_group.status}",
"aps": {
"alert": f"Critical page: {message}",
# This is disabled until we gain the Critical Alerts Api permission from apple
# "interruption-level": "critical",
"interruption-level": "time-sensitive",
"interruption-level": "critical",
"sound": "ambulance.aiff",
},
},

View file

@ -138,7 +138,7 @@ def test_render_terraform_imports(
result = renderer.render_state()
expected_result = rendered_terraform_imports_template.format(
escalation_chain_name=escalation_chain.name,
escalation_chain_name=slugify(escalation_chain.name),
escalation_chain_public_primary_key=escalation_chain.public_primary_key,
integration_name=slugify(integration.verbal_name),
integration_public_primary_key=integration.public_primary_key,

View file

@ -6,6 +6,8 @@ from urllib.parse import urlparse
import requests
from apps.base.utils import live_settings
OUTGOING_WEBHOOK_TIMEOUT = 10
@ -52,13 +54,15 @@ def request_outgoing_webhook(webhook_url, http_request_type, post_kwargs={}) ->
return False, "Malformed url"
if not parsed_url.netloc:
return False, "Malformed url"
# Get the ip address of the webhook url and check if it belongs to the private network
try:
webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
except socket.gaierror:
return False, "Cannot resolve name in url"
if ipaddress.ip_address(socket.gethostbyname(webhook_url_ip_address)).is_private:
return False, "This url is not supported for outgoing webhooks"
if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
# Get the ip address of the webhook url and check if it belongs to the private network
try:
webhook_url_ip_address = socket.gethostbyname(parsed_url.netloc)
except socket.gaierror:
return False, "Cannot resolve name in url"
if not live_settings.DANGEROUS_WEBHOOKS_ENABLED:
if ipaddress.ip_address(socket.gethostbyname(webhook_url_ip_address)).is_private:
return False, "This url is not supported for outgoing webhooks"
try:
if http_request_type == "POST":

View file

@ -1,7 +1,5 @@
import logging
from datetime import datetime
import humanize
from rest_framework import serializers
from apps.alerts.incident_appearance.renderers.classic_markdown_renderer import AlertGroupClassicMarkdownRenderer
@ -29,51 +27,31 @@ class ShortAlertGroupSerializer(serializers.ModelSerializer):
return AlertGroupWebRenderer(obj).render()
class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"""
Attention: It's heavily cached. Make sure to invalidate alertgroup's web cache if you update the format!
"""
class AlertGroupListSerializer(EagerLoadingMixin, serializers.ModelSerializer):
pk = serializers.CharField(read_only=True, source="public_primary_key")
alert_receive_channel = FastAlertReceiveChannelSerializer(source="channel")
alerts = serializers.SerializerMethodField("get_limited_alerts")
resolved_by_verbose = serializers.CharField(source="get_resolved_by_display")
status = serializers.ReadOnlyField()
resolved_by_user = FastUserSerializer(required=False)
acknowledged_by_user = FastUserSerializer(required=False)
silenced_by_user = FastUserSerializer(required=False)
related_users = serializers.SerializerMethodField()
last_alert_at = serializers.SerializerMethodField()
started_at_verbose = serializers.SerializerMethodField()
acknowledged_at_verbose = serializers.SerializerMethodField()
resolved_at_verbose = serializers.SerializerMethodField()
silenced_at_verbose = serializers.SerializerMethodField()
dependent_alert_groups = ShortAlertGroupSerializer(many=True)
root_alert_group = ShortAlertGroupSerializer()
alerts_count = serializers.ReadOnlyField()
status = serializers.ReadOnlyField()
alerts_count = serializers.IntegerField(read_only=True)
render_for_web = serializers.SerializerMethodField()
render_for_classic_markdown = serializers.SerializerMethodField()
PREFETCH_RELATED = [
"alerts",
"dependent_alert_groups",
"log_records",
"log_records__author",
"log_records__escalation_policy",
"log_records__invitation__invitee",
]
SELECT_RELATED = [
"slack_message",
"channel__organization",
"slack_message___slack_team_identity",
"acknowledged_by_user",
"root_alert_group",
"resolved_by_user",
"acknowledged_by_user",
"silenced_by_user",
]
@ -87,7 +65,6 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"alert_receive_channel",
"resolved",
"resolved_by",
"resolved_by_verbose",
"resolved_by_user",
"resolved_at",
"acknowledged_at",
@ -98,48 +75,30 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"silenced",
"silenced_by_user",
"silenced_at",
"silenced_at_verbose",
"silenced_until",
"started_at",
"last_alert_at",
"silenced_until",
"permalink",
"alerts",
"related_users",
"started_at_verbose",
"acknowledged_at_verbose",
"resolved_at_verbose",
"render_for_web",
"render_after_resolve_report_json",
"render_for_classic_markdown",
"dependent_alert_groups",
"root_alert_group",
"status",
]
def get_last_alert_at(self, obj):
last_alert = obj.alerts.last()
# TODO: This is a Hotfix for 0.0.27
if last_alert is None:
logger.warning(f"obj {obj} doesn't have last_alert!")
return ""
return str(last_alert.created_at)
def get_render_for_web(self, obj):
# alert group has no alerts
if not obj.last_alert:
return {}
def get_limited_alerts(self, obj):
"""
Overriding default alerts because there are alert_groups with thousands of them.
It's just too slow, we need to cut here.
"""
alerts = obj.alerts.all()[:100]
if len(alerts) > 90:
for alert in alerts:
alert.title = str(alert.title) + " Only last 100 alerts are shown. Use Amixr API to fetch all of them."
return AlertSerializer(alerts, many=True).data
return AlertGroupWebRenderer(obj, obj.last_alert).render()
def get_render_for_classic_markdown(self, obj):
return AlertGroupClassicMarkdownRenderer(obj).render()
# alert group has no alerts
if not obj.last_alert:
return {}
return AlertGroupClassicMarkdownRenderer(obj, obj.last_alert).render()
def get_related_users(self, obj):
users_ids = set()
@ -165,37 +124,44 @@ class AlertGroupSerializer(EagerLoadingMixin, serializers.ModelSerializer):
users_ids.add(log_record.author.public_primary_key)
return users
def get_started_at_verbose(self, obj):
started_at_verbose = None
if obj.started_at is not None:
started_at_verbose = humanize.naturaltime(
datetime.now().replace(tzinfo=None) - obj.started_at.replace(tzinfo=None)
)
return started_at_verbose
def get_acknowledged_at_verbose(self, obj):
acknowledged_at_verbose = None
if obj.acknowledged_at is not None:
acknowledged_at_verbose = humanize.naturaltime(
datetime.now().replace(tzinfo=None) - obj.acknowledged_at.replace(tzinfo=None)
) # TODO: Deal with timezones
return acknowledged_at_verbose
class AlertGroupSerializer(AlertGroupListSerializer):
alerts = serializers.SerializerMethodField("get_limited_alerts")
last_alert_at = serializers.SerializerMethodField()
def get_resolved_at_verbose(self, obj):
resolved_at_verbose = None
if obj.resolved_at is not None:
resolved_at_verbose = humanize.naturaltime(
datetime.now().replace(tzinfo=None) - obj.resolved_at.replace(tzinfo=None)
) # TODO: Deal with timezones
return resolved_at_verbose
def get_silenced_at_verbose(self, obj):
silenced_at_verbose = None
if obj.silenced_at is not None:
silenced_at_verbose = humanize.naturaltime(
datetime.now().replace(tzinfo=None) - obj.silenced_at.replace(tzinfo=None)
) # TODO: Deal with timezones
return silenced_at_verbose
class Meta(AlertGroupListSerializer.Meta):
fields = AlertGroupListSerializer.Meta.fields + [
"alerts",
"render_after_resolve_report_json",
"permalink",
"last_alert_at",
]
def get_render_for_web(self, obj):
# alert group has no alerts
alert = obj.alerts.last()
if not alert:
return {}
return AlertGroupWebRenderer(obj).render()
def get_last_alert_at(self, obj):
last_alert = obj.alerts.last()
if not last_alert:
return obj.started_at
return last_alert.created_at
def get_limited_alerts(self, obj):
"""
Overriding default alerts because there are alert_groups with thousands of them.
It's just too slow, we need to cut here.
"""
alerts = obj.alerts.all()[:100]
if len(alerts) > 90:
for alert in alerts:
alert.title = str(alert.title) + " Only last 100 alerts are shown. Use OnCall API to fetch all of them."
return AlertSerializer(alerts, many=True).data

View file

@ -1,7 +1,6 @@
from rest_framework import serializers
from apps.alerts.models import AlertGroup, ResolutionNote
from apps.alerts.tasks import invalidate_web_cache_for_alert_group
from apps.api.serializers.user import FastUserSerializer
from common.api_helpers.custom_fields import OrganizationFilteredPrimaryKeyRelatedField
from common.api_helpers.exceptions import BadRequest
@ -39,9 +38,6 @@ class ResolutionNoteSerializer(EagerLoadingMixin, serializers.ModelSerializer):
validated_data["author"] = self.context["request"].user
validated_data["source"] = ResolutionNote.Source.WEB
created_instance = super().create(validated_data)
# Invalidate alert group cache because resolution notes shown in alert group's timeline
created_instance.alert_group.drop_cached_after_resolve_report_json()
invalidate_web_cache_for_alert_group(alert_group_pk=created_instance.alert_group.pk)
return created_instance
def to_representation(self, instance):
@ -57,8 +53,5 @@ class ResolutionNoteUpdateSerializer(ResolutionNoteSerializer):
def update(self, instance, validated_data):
if instance.source != ResolutionNote.Source.WEB:
raise BadRequest(detail="Cannot update message with this source type")
updated_instance = super().update(instance, validated_data)
# Invalidate alert group cache because resolution notes shown in alert group's timeline
updated_instance.alert_group.drop_cached_after_resolve_report_json()
invalidate_web_cache_for_alert_group(alert_group_pk=updated_instance.alert_group.pk)
return updated_instance
return super().update(instance, validated_data)

View file

@ -1,55 +0,0 @@
from celery.utils.log import get_task_logger
from django.apps import apps
from django.conf import settings
from django.core.cache import cache
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
logger = get_task_logger(__name__)
def get_cache_key_caching_alert_group_for_web(alert_group_pk):
CACHE_KEY_PREFIX = "cache_alert_group_for_web"
return f"{CACHE_KEY_PREFIX}_{alert_group_pk}"
# TODO: remove this tasks after all of them will be processed in prod
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def schedule_cache_for_alert_group(alert_group_pk):
CACHE_FOR_ALERT_GROUP_LIFETIME = 60
START_CACHE_DELAY = 5 # we introduce delay to avoid recaching after each alert.
task = cache_alert_group_for_web.apply_async(args=[alert_group_pk], countdown=START_CACHE_DELAY)
cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
cache.set(cache_key, task.id, timeout=CACHE_FOR_ALERT_GROUP_LIFETIME)
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def cache_alert_group_for_web(alert_group_pk):
"""
Async task to re-cache alert_group for web.
"""
cache_key = get_cache_key_caching_alert_group_for_web(alert_group_pk)
cached_task_id = cache.get(cache_key)
current_task_id = cache_alert_group_for_web.request.id
if cached_task_id is None:
return (
f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
f" for alert_group {alert_group_pk} doesn't exist in cache, which means this task is not"
f" relevant: cache was dropped by engine restart ot CACHE_FOR_ALERT_GROUP_LIFETIME"
)
if not current_task_id == cached_task_id or cached_task_id is None:
return (
f"cache_alert_group_for_web skipped, because of current task_id ({current_task_id})"
f" doesn't equal to cached task_id ({cached_task_id}) for alert_group {alert_group_pk},"
)
else:
AlertGroup = apps.get_model("alerts", "AlertGroup")
alert_group = AlertGroup.all_objects.using_readonly_db.get(pk=alert_group_pk)
alert_group.cache_for_web(alert_group.channel.organization)
logger.info(f"cache_alert_group_for_web: cache refreshed for alert_group {alert_group_pk}")

View file

@ -63,7 +63,7 @@ def test_get_filter_started_at(alert_group_internal_api_setup, make_user_auth_he
)
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 4
assert len(response.data["results"]) == 4
@pytest.mark.django_db
@ -78,7 +78,7 @@ def test_get_filter_resolved_at_alertgroup_empty_result(alert_group_internal_api
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 0
assert len(response.data["results"]) == 0
@pytest.mark.django_db
@ -105,7 +105,7 @@ def test_get_filter_resolved_at(alert_group_internal_api_setup, make_user_auth_h
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
@pytest.mark.django_db
@ -117,7 +117,7 @@ def test_status_new(alert_group_internal_api_setup, make_user_auth_headers):
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=0", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == new_alert_group.public_primary_key
@ -130,7 +130,7 @@ def test_status_ack(alert_group_internal_api_setup, make_user_auth_headers):
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=1", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == ack_alert_group.public_primary_key
@ -143,7 +143,7 @@ def test_status_resolved(alert_group_internal_api_setup, make_user_auth_headers)
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=2", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == resolved_alert_group.public_primary_key
@ -156,7 +156,7 @@ def test_status_silenced(alert_group_internal_api_setup, make_user_auth_headers)
url = reverse("api-internal:alertgroup-list")
response = client.get(url + "?status=3", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
assert response.data["results"][0]["pk"] == silenced_alert_group.public_primary_key
@ -171,7 +171,7 @@ def test_all_statuses(alert_group_internal_api_setup, make_user_auth_headers):
url + "?status=0&status=1&&status=2&status=3", format="json", **make_user_auth_headers(user, token)
)
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 4
assert len(response.data["results"]) == 4
@pytest.mark.django_db
@ -213,7 +213,7 @@ def test_get_filter_resolved_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 1
assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?resolved_by={second_user.public_primary_key}",
@ -221,7 +221,7 @@ def test_get_filter_resolved_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert second_response.data["count"] == 0
assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@ -269,7 +269,7 @@ def test_get_filter_resolved_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 2
assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@ -309,7 +309,7 @@ def test_get_filter_acknowledged_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 1
assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?acknowledged_by={second_user.public_primary_key}",
@ -317,7 +317,7 @@ def test_get_filter_acknowledged_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert second_response.data["count"] == 0
assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@ -363,7 +363,7 @@ def test_get_filter_acknowledged_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 2
assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@ -402,7 +402,7 @@ def test_get_filter_silenced_by(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 1
assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?silenced_by={second_user.public_primary_key}",
@ -410,7 +410,7 @@ def test_get_filter_silenced_by(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert second_response.data["count"] == 0
assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@ -455,7 +455,7 @@ def test_get_filter_silenced_by_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 2
assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@ -494,7 +494,7 @@ def test_get_filter_invitees_are(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 1
assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?invitees_are={second_user.public_primary_key}",
@ -502,7 +502,7 @@ def test_get_filter_invitees_are(
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert second_response.data["count"] == 0
assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
@ -548,7 +548,7 @@ def test_get_filter_invitees_are_multiple_values(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 2
assert len(first_response.data["results"]) == 2
@pytest.mark.django_db
@ -593,7 +593,7 @@ def test_get_filter_invitees_are_ag_with_multiple_logs(
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert first_response.data["count"] == 1
assert len(first_response.data["results"]) == 1
@pytest.mark.django_db
@ -611,11 +611,11 @@ def test_get_filter_with_resolution_note(
# there are no alert groups with resolution_notes
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 0
assert len(response.data["results"]) == 0
response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 4
assert len(response.data["results"]) == 4
# add resolution_notes to two of four alert groups
make_resolution_note(res_alert_group)
@ -623,11 +623,11 @@ def test_get_filter_with_resolution_note(
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 2
assert len(response.data["results"]) == 2
response = client.get(url + "?with_resolution_note=false", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 2
assert len(response.data["results"]) == 2
@pytest.mark.django_db
@ -653,7 +653,7 @@ def test_get_filter_with_resolution_note_after_delete_resolution_note(
response = client.get(url + "?with_resolution_note=true", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 1
assert len(response.data["results"]) == 1
@pytest.mark.django_db

View file

@ -432,6 +432,7 @@ def test_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
"is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@ -497,6 +498,7 @@ def test_filter_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
"is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@ -512,6 +514,7 @@ def test_filter_events_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
"is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@ -594,6 +597,7 @@ def test_filter_events_range_calendar(
"calendar_type": OnCallSchedule.PRIMARY,
"is_empty": False,
"is_gap": False,
"is_override": False,
"shift": {
"pk": on_call_shift.public_primary_key,
},
@ -675,6 +679,7 @@ def test_filter_events_overrides(
"calendar_type": OnCallSchedule.OVERRIDES,
"is_empty": False,
"is_gap": False,
"is_override": True,
"shift": {
"pk": override.public_primary_key,
},
@ -737,7 +742,7 @@ def test_filter_events_final_schedule(
# override: 22-23 / E
override_data = {
"start": start_date + timezone.timedelta(hours=22),
"rotation_start": start_date,
"rotation_start": start_date + timezone.timedelta(hours=22),
"duration": timezone.timedelta(hours=1),
"schedule": schedule,
}
@ -772,6 +777,7 @@ def test_filter_events_final_schedule(
"calendar_type": 1 if is_override else None if is_gap else 0,
"end": start_date + timezone.timedelta(hours=start + duration),
"is_gap": is_gap,
"is_override": is_override,
"priority_level": priority,
"start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0),
"user": user,
@ -783,6 +789,7 @@ def test_filter_events_final_schedule(
"calendar_type": e["calendar_type"],
"end": e["end"],
"is_gap": e["is_gap"],
"is_override": e["is_override"],
"priority_level": e["priority_level"],
"start": e["start"],
"user": e["users"][0]["display_name"] if e["users"] else None,
@ -792,6 +799,75 @@ def test_filter_events_final_schedule(
assert returned_events == expected_events
@pytest.mark.django_db
def test_next_shifts_per_user(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_user_auth_headers,
make_schedule,
make_on_call_shift,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
schedule = make_schedule(
organization,
schedule_class=OnCallScheduleWeb,
name="test_web_schedule",
)
tomorrow = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) + timezone.timedelta(days=1)
user_a, user_b, user_c = (make_user_for_organization(organization, username=i) for i in "ABC")
shifts = (
# user, priority, start time (h), duration (hs)
(user_a, 1, 8, 2), # r1-1: 8-10 / A
(user_a, 1, 15, 2), # r1-2: 15-17 / A
(user_b, 2, 7, 5), # r2-1: 7-12 / B
(user_b, 2, 16, 2), # r2-2: 16-18 / B
(user_c, 2, 18, 2), # r2-3: 18-20 / C
)
for user, priority, start_h, duration in shifts:
data = {
"start": tomorrow + timezone.timedelta(hours=start_h),
"rotation_start": tomorrow,
"duration": timezone.timedelta(hours=duration),
"priority_level": priority,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
}
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, **data
)
on_call_shift.users.add(user)
# override: 17-18 / C
override_data = {
"start": tomorrow + timezone.timedelta(hours=17),
"rotation_start": tomorrow + timezone.timedelta(hours=17),
"duration": timezone.timedelta(hours=1),
"schedule": schedule,
}
override = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_OVERRIDE, **override_data
)
override.add_rolling_users([[user_c]])
# final schedule: 7-12: B, 15-16: A, 16-17: B, 17-18: C (override), 18-20: C
url = reverse("api-internal:schedule-next-shifts-per-user", kwargs={"pk": schedule.public_primary_key})
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
expected = {
user_a.public_primary_key: (tomorrow + timezone.timedelta(hours=15), tomorrow + timezone.timedelta(hours=16)),
user_b.public_primary_key: (tomorrow + timezone.timedelta(hours=7), tomorrow + timezone.timedelta(hours=12)),
user_c.public_primary_key: (tomorrow + timezone.timedelta(hours=17), tomorrow + timezone.timedelta(hours=18)),
}
returned_data = {u: (ev["start"], ev["end"]) for u, ev in response.data["users"].items()}
assert returned_data == expected
@pytest.mark.django_db
def test_filter_events_invalid_type(
make_organization_and_user_with_plugin_token,

View file

@ -1,10 +1,6 @@
from datetime import datetime, timedelta
from datetime import timedelta
from django import forms
from django.db import models
from django.db.models import CharField, Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.functions import Cast
from django.db.models import Count, Max, Q
from django.utils import timezone
from django_filters import rest_framework as filters
from django_filters.widgets import RangeWidget
@ -15,16 +11,15 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.alerts.constants import ActionSource
from apps.alerts.models import AlertGroup, AlertReceiveChannel
from apps.alerts.tasks import invalidate_web_cache_for_alert_group
from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdminOrEditor
from apps.api.serializers.alert_group import AlertGroupSerializer
from apps.api.serializers.alert_group import AlertGroupListSerializer, AlertGroupSerializer
from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthentication
from apps.user_management.models import User
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
from common.api_helpers.mixins import PreviewTemplateMixin, PublicPrimaryKeyMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.api_helpers.paginators import TwentyFiveCursorPaginator
def get_integration_queryset(request):
@ -148,34 +143,6 @@ class AlertGroupFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.Filt
return queryset
class CustomSearchFilter(SearchFilter):
def must_call_distinct(self, queryset, search_fields):
"""
Return True if 'distinct()' should be used to query the given lookups.
"""
for search_field in search_fields:
opts = queryset.model._meta
if search_field[0] in self.lookup_prefixes:
search_field = search_field[1:]
# From https://github.com/encode/django-rest-framework/pull/6240/files#diff-01f357e474dd8fd702e4951b9227bffcR88
# Annotated fields do not need to be distinct
if isinstance(queryset, models.QuerySet) and search_field in queryset.query.annotations:
continue
parts = search_field.split(LOOKUP_SEP)
for part in parts:
field = opts.get_field(part)
if hasattr(field, "get_path_info"):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
class AlertGroupView(
PreviewTemplateMixin,
PublicPrimaryKeyMixin,
@ -216,90 +183,90 @@ class AlertGroupView(
serializer_class = AlertGroupSerializer
pagination_class = FiftyPageSizePaginator
pagination_class = TwentyFiveCursorPaginator
filter_backends = [CustomSearchFilter, filters.DjangoFilterBackend]
search_fields = ["cached_render_for_web_str"]
filter_backends = [SearchFilter, filters.DjangoFilterBackend]
# todo: add ability to search by templated title
search_fields = ["public_primary_key", "inside_organization_number"]
filterset_class = AlertGroupFilter
def list(self, request, *args, **kwargs):
"""
It's compute-heavy so we rely on cache here.
Attention: Make sure to invalidate cache if you update the format!
"""
queryset = self.filter_queryset(self.get_queryset(eager=False, readonly=True))
def get_serializer_class(self):
if self.action == "list":
return AlertGroupListSerializer
page = self.paginate_queryset(queryset)
skip_slow_rendering = request.query_params.get("skip_slow_rendering") == "true"
data = []
return super().get_serializer_class()
for alert_group in page:
if alert_group.cached_render_for_web == {}:
# We cannot give empty data to web. So caching synchronously here.
if skip_slow_rendering:
# We just return dummy data.
# Cache is not launched because after skip_slow_rendering request should come usual one
# which will start caching
data.append({"pk": alert_group.pk, "short": True})
else:
# Synchronously cache and return. It could be slow.
alert_group.cache_for_web(alert_group.channel.organization)
data.append(alert_group.cached_render_for_web)
else:
data.append(alert_group.cached_render_for_web)
if not skip_slow_rendering:
# Cache is not launched because after skip_slow_rendering request should come usual one
# which will start caching
alert_group.schedule_cache_for_web()
def get_queryset(self):
# no select_related or prefetch_related is used at this point, it will be done on paginate_queryset.
queryset = AlertGroup.unarchived_objects.filter(
channel__organization=self.request.auth.organization, channel__team=self.request.user.current_team
).only("id")
return self.get_paginated_response(data)
def get_queryset(self, eager=True, readonly=False, order=True):
if readonly:
queryset = AlertGroup.unarchived_objects.using_readonly_db
else:
queryset = AlertGroup.unarchived_objects
queryset = queryset.filter(
channel__organization=self.request.auth.organization,
channel__team=self.request.user.current_team,
)
if order:
queryset = queryset.order_by("-started_at")
queryset = queryset.annotate(cached_render_for_web_str=Cast("cached_render_for_web", output_field=CharField()))
if eager:
queryset = self.serializer_class.setup_eager_loading(queryset)
return queryset
def get_alert_groups_and_days_for_previous_same_period(self):
prev_alert_groups = AlertGroup.unarchived_objects.none()
delta_days = None
def paginate_queryset(self, queryset):
"""
All SQL joins (select_related and prefetch_related) will be performed AFTER pagination, so it only joins tables
for 25 alert groups, not the whole table.
"""
alert_groups = super().paginate_queryset(queryset)
alert_groups = self.enrich(alert_groups)
return alert_groups
started_at = self.request.query_params.get("started_at", None)
if started_at is not None:
started_at_gte, started_at_lte = AlertGroupFilter.parse_custom_datetime_range(started_at)
delta_days = None
if started_at_lte is not None:
started_at_lte = forms.DateTimeField().to_python(started_at_lte)
else:
started_at_lte = datetime.now()
def get_object(self):
obj = super().get_object()
obj = self.enrich([obj])[0]
return obj
if started_at_gte is not None:
started_at_gte = forms.DateTimeField().to_python(value=started_at_gte)
delta = started_at_lte.replace(tzinfo=None) - started_at_gte.replace(tzinfo=None)
prev_alert_groups = self.get_queryset().filter(
started_at__range=[started_at_gte - delta, started_at_gte]
)
delta_days = delta.days
return prev_alert_groups, delta_days
def enrich(self, alert_groups):
"""
This method performs select_related and prefetch_related (using setup_eager_loading) as well as in-memory joins
to add additional info like alert_count and last_alert for every alert group efficiently.
We need the last_alert because it's used by AlertGroupWebRenderer.
"""
# enrich alert groups with select_related and prefetch_related
alert_group_pks = [alert_group.pk for alert_group in alert_groups]
queryset = AlertGroup.all_objects.filter(pk__in=alert_group_pks).order_by("-pk")
# do not load cached_render_for_web as it's deprecated and can be very large
queryset = queryset.defer("cached_render_for_web")
queryset = self.get_serializer_class().setup_eager_loading(queryset)
alert_groups = list(queryset)
# get info on alerts count and last alert ID for every alert group
alerts_info = (
Alert.objects.values("group_id")
.filter(group_id__in=alert_group_pks)
.annotate(alerts_count=Count("group_id"), last_alert_id=Max("id"))
)
alerts_info_map = {info["group_id"]: info for info in alerts_info}
# fetch last alerts for every alert group
last_alert_ids = [info["last_alert_id"] for info in alerts_info_map.values()]
last_alerts = Alert.objects.filter(pk__in=last_alert_ids)
for alert in last_alerts:
# link group back to alert
alert.group = [alert_group for alert_group in alert_groups if alert_group.pk == alert.group_id][0]
alerts_info_map[alert.group_id].update({"last_alert": alert})
# add additional "alerts_count" and "last_alert" fields to every alert group
for alert_group in alert_groups:
try:
alert_group.last_alert = alerts_info_map[alert_group.pk]["last_alert"]
alert_group.alerts_count = alerts_info_map[alert_group.pk]["alerts_count"]
except KeyError:
# alert group has no alerts
alert_group.last_alert = None
alert_group.alerts_count = 0
return alert_groups
@action(detail=False)
def stats(self, *args, **kwargs):
alert_groups = self.filter_queryset(self.get_queryset(eager=False))
alert_groups = self.filter_queryset(self.get_queryset())
# Only count field is used, other fields left just in case for the backward compatibility
return Response(
{
@ -324,7 +291,6 @@ class AlertGroupView(
if alert_group.root_alert_group is not None:
raise BadRequest(detail="Can't acknowledge an attached alert group")
alert_group.acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@ -344,7 +310,6 @@ class AlertGroupView(
raise BadRequest(detail="Can't unacknowledge a resolved alert group")
alert_group.un_acknowledge_by_user(self.request.user, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@ -365,7 +330,6 @@ class AlertGroupView(
status=status.HTTP_400_BAD_REQUEST,
)
alert_group.resolve_by_user(self.request.user, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@ -381,7 +345,6 @@ class AlertGroupView(
raise BadRequest(detail="The alert group is not resolved")
alert_group.un_resolve_by_user(self.request.user, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@ -404,8 +367,6 @@ class AlertGroupView(
return Response(status=status.HTTP_400_BAD_REQUEST)
alert_group.attach_by_user(self.request.user, root_alert_group, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@ -415,10 +376,8 @@ class AlertGroupView(
raise BadRequest(detail="Can't unattach maintenance alert group")
if alert_group.is_root_alert_group:
raise BadRequest(detail="Can't unattach an alert group because it is not attached")
root_alert_group_pk = alert_group.root_alert_group_id
alert_group.un_attach_by_user(self.request.user, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
invalidate_web_cache_for_alert_group(alert_group_pk=root_alert_group_pk)
return Response(AlertGroupSerializer(alert_group, context={"request": self.request}).data)
@action(methods=["post"], detail=True)
@ -433,7 +392,6 @@ class AlertGroupView(
raise BadRequest(detail="Can't silence an attached alert group")
alert_group.silence_by_user(request.user, silence_delay=delay, action_source=ActionSource.WEB)
invalidate_web_cache_for_alert_group(alert_group_pk=alert_group.pk)
return Response(AlertGroupSerializer(alert_group, context={"request": request}).data)
@action(methods=["get"], detail=False)
@ -548,9 +506,9 @@ class AlertGroupView(
raise BadRequest(detail="Please specify a delay for silence")
kwargs["silence_delay"] = delay
alert_groups = self.get_queryset(eager=False).filter(public_primary_key__in=alert_group_public_pks)
alert_group_pks = list(alert_groups.values_list("id", flat=True))
invalidate_web_cache_for_alert_group(alert_group_pks=alert_group_pks)
alert_groups = AlertGroup.unarchived_objects.filter(
channel__organization=self.request.auth.organization, public_primary_key__in=alert_group_public_pks
)
kwargs["user"] = self.request.user
kwargs["alert_groups"] = alert_groups

View file

@ -1,7 +1,6 @@
from contextlib import suppress
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from rest_framework import status, viewsets
from rest_framework.permissions import IsAuthenticated
@ -11,7 +10,7 @@ from apps.api.permissions import IsAdmin
from apps.api.serializers.live_setting import LiveSettingSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.base.models import LiveSetting
from apps.base.utils import live_settings
from apps.oss_installation.models import CloudConnector
from apps.oss_installation.tasks import sync_users_with_cloud
from apps.slack.tasks import unpopulate_slack_user_identities
from apps.telegram.client import TelegramClient
@ -40,54 +39,50 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet):
return queryset
def perform_update(self, serializer):
old_value = serializer.instance.value
new_value = serializer.validated_data["value"]
self._update_hook(new_value)
instance = serializer.save()
sync_users = self.request.query_params.get("sync_users", "true") == "true"
if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN" and sync_users:
sync_users_with_cloud.apply_async()
super().perform_update(serializer)
if new_value != old_value:
self._post_update_hook(old_value)
def perform_destroy(self, instance):
old_value = instance.value
new_value = instance.default_value
self._update_hook(new_value)
super().perform_destroy(instance)
def _update_hook(self, new_value):
if new_value != old_value:
self._post_update_hook(old_value)
def _post_update_hook(self, old_value):
instance = self.get_object()
if instance.name == "TELEGRAM_TOKEN":
try:
old_token = live_settings.TELEGRAM_TOKEN
except ImproperlyConfigured:
old_token = None
self._reset_telegram_integration(old_token=old_value)
register_telegram_webhook.delay()
if old_token != new_value:
self._reset_telegram_integration(new_token=new_value)
if instance.name == "TELEGRAM_WEBHOOK_HOST":
register_telegram_webhook.delay()
for setting_name in ["SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET"]:
if instance.name == setting_name:
if getattr(live_settings, setting_name) != new_value:
organization = self.request.auth.organization
sti = organization.slack_team_identity
if sti is not None:
unpopulate_slack_user_identities.apply_async((sti.pk, True), countdown=0)
if instance.name in ["SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET"]:
organization = self.request.auth.organization
slack_team_identity = organization.slack_team_identity
if slack_team_identity is not None:
unpopulate_slack_user_identities.delay(organization_pk=organization.pk, force=True)
if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN":
from apps.oss_installation.models import CloudConnector
CloudConnector.remove_sync()
try:
old_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN
except ImproperlyConfigured:
old_token = None
sync_users = self.request.query_params.get("sync_users", "true") == "true"
if sync_users:
sync_users_with_cloud.apply_async()
if old_token != new_value:
CloudConnector.remove_sync()
def _reset_telegram_integration(self, new_token):
def _reset_telegram_integration(self, old_token):
# tell Telegram to cancel sending events from old bot
with suppress(ImproperlyConfigured, error.InvalidToken, error.Unauthorized):
old_client = TelegramClient()
with suppress(error.InvalidToken, error.Unauthorized):
old_client = TelegramClient(token=old_token)
old_client.api_client.delete_webhook()
# delete telegram channels for current team
@ -101,6 +96,3 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet):
for user in users_with_telegram_connector:
user.telegram_connection.delete()
# tell Telegram to send updates to new bot
register_telegram_webhook.delay(token=new_token)

View file

@ -43,10 +43,7 @@ class RouteRegexDebuggerView(APIView):
if len(incidents_matching_regex) < MAX_INCIDENTS_TO_SHOW:
first_alert = ag.alerts.all()[0]
if re.search(regex, json.dumps(first_alert.raw_request_data)):
if ag.cached_render_for_web:
title = ag.cached_render_for_web["render_for_web"]["title"]
else:
title = AlertWebRenderer(first_alert).render()["title"]
title = AlertWebRenderer(first_alert).render()["title"]
incidents_matching_regex.append(
{
"title": title,

View file

@ -58,6 +58,7 @@ class ScheduleView(
*READ_ACTIONS,
"events",
"filter_events",
"next_shifts_per_user",
"notify_empty_oncall_options",
"notify_oncall_shift_freq_options",
"mention_options",
@ -222,6 +223,7 @@ class ScheduleView(
"calendar_type": shift["calendar_type"],
"is_empty": len(shift["users"]) == 0 and not is_gap,
"is_gap": is_gap,
"is_override": shift["calendar_type"] == OnCallSchedule.TYPE_ICAL_OVERRIDES,
"shift": {
"pk": shift["shift_pk"],
},
@ -395,6 +397,25 @@ class ScheduleView(
resolved.sort(key=lambda e: e["start"])
return resolved
@action(detail=True, methods=["get"])
def next_shifts_per_user(self, request, pk):
"""Return next shift for users in schedule."""
user_tz, _ = self.get_request_timezone()
now = timezone.now()
starting_date = now.date()
schedule = self.original_get_object()
shift_events = self._filter_events(schedule, user_tz, starting_date, days=30, with_empty=False, with_gap=False)
events = self._resolve_schedule(shift_events)
users = {}
for e in events:
user = e["users"][0]["pk"] if e["users"] else None
if user is not None and user not in users and e["end"] > now:
users[user] = e
result = {"users": users}
return Response(result, status=status.HTTP_200_OK)
@action(detail=False, methods=["get"])
def type_options(self, request):
# TODO: check if it needed

View file

@ -47,6 +47,7 @@ class LiveSetting(models.Model):
"GRAFANA_CLOUD_ONCALL_TOKEN",
"GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED",
"GRAFANA_CLOUD_NOTIFICATIONS_ENABLED",
"DANGEROUS_WEBHOOKS_ENABLED",
)
DESCRIPTIONS = {
@ -107,10 +108,10 @@ class LiveSetting(models.Model):
"SENDGRID_SECRET_KEY": "It is the secret key to secure receiving inbound emails.",
"SENDGRID_INBOUND_EMAIL_DOMAIN": "Domain to receive emails for inbound emails integration.",
"TELEGRAM_TOKEN": (
"Secret token for Telegram bot, you can get one via " "<a href='https://t.me/BotFather'>BotFather</a>."
"Secret token for Telegram bot, you can get one via <a href='https://t.me/BotFather'>BotFather</a>."
),
"TELEGRAM_WEBHOOK_HOST": (
"Externally available URL for Telegram to make requests. Please restart OnCall backend after after update."
"Externally available URL for Telegram to make requests. Must use https and ports 80, 88, 443, 8443."
),
"SEND_ANONYMOUS_USAGE_STATS": (
"Grafana OnCall will send anonymous, but uniquely-identifiable usage analytics to Grafana Labs."
@ -120,6 +121,7 @@ class LiveSetting(models.Model):
"GRAFANA_CLOUD_ONCALL_TOKEN": "Secret token for Grafana Cloud OnCall instance.",
"GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED": "Enable heartbeat integration with Grafana Cloud OnCall.",
"GRAFANA_CLOUD_NOTIFICATIONS_ENABLED": "Enable SMS/call notifications via Grafana Cloud OnCall",
"DANGEROUS_WEBHOOKS_ENABLED": "Enable outgoing webhooks to private networks",
}
SECRET_SETTING_NAMES = (

View file

@ -315,7 +315,6 @@ class UserNotificationPolicyLogRecord(models.Model):
@receiver(post_save, sender=UserNotificationPolicyLogRecord)
def listen_for_usernotificationpolicylogrecord_model_save(sender, instance, created, *args, **kwargs):
instance.alert_group.drop_cached_after_resolve_report_json()
alert_group_pk = instance.alert_group.pk
if instance.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FINISHED:
logger.debug(

View file

@ -1,5 +1,6 @@
import json
import re
from urllib.parse import urlparse
from django.apps import apps
from python_http_client import UnauthorizedError
@ -8,6 +9,8 @@ from telegram import Bot
from twilio.base.exceptions import TwilioException
from twilio.rest import Client
from common.api_helpers.utils import create_engine_url
class LiveSettingProxy:
def __dir__(self):
@ -86,6 +89,12 @@ class LiveSettingValidator:
if not cls._is_email_valid(sendgrid_from_email):
return "Please specify a valid email"
@classmethod
def _check_slack_install_return_redirect_host(cls, slack_install_return_redirect_host):
scheme = urlparse(slack_install_return_redirect_host).scheme
if scheme != "https":
return "Must use https"
@classmethod
def _check_telegram_token(cls, telegram_token):
try:
@ -94,6 +103,15 @@ class LiveSettingValidator:
except Exception as e:
return f"Telegram error: {str(e)}"
@classmethod
def _check_telegram_webhook_host(cls, telegram_webhook_host):
try:
url = create_engine_url("/telegram/", override_base=telegram_webhook_host)
bot = Bot(token=live_settings.TELEGRAM_TOKEN)
bot.set_webhook(url)
except Exception as e:
return f"Telegram error: {str(e)}"
@classmethod
def _check_grafana_cloud_oncall_token(cls, grafana_oncall_token):
from apps.oss_installation.models import CloudConnector

View file

@ -32,7 +32,7 @@ def construct_expected_response_from_incidents(incidents):
"id": incident.public_primary_key,
"integration_id": incident.channel.public_primary_key,
"route_id": incident.channel_filter.public_primary_key,
"alerts_count": incident.alerts_count,
"alerts_count": incident.alerts.count(),
"state": incident.state,
"created_at": created_at,
"resolved_at": resolved_at,

View file

@ -226,19 +226,6 @@ class CustomOnCallShift(models.Model):
for schedule in schedules_to_update:
self.start_drop_ical_and_check_schedule_tasks(schedule)
@property
def event_is_started(self):
return bool(self.rotation_start <= timezone.now())
@property
def event_is_finished(self):
if self.frequency is not None:
is_finished = bool(self.until <= timezone.now()) if self.until else False
else:
is_finished = bool(self.start + self.duration <= timezone.now())
return is_finished
@property
def repr_settings_for_client_side_logging(self) -> str:
"""
@ -273,31 +260,70 @@ class CustomOnCallShift(models.Model):
)
return result
@property
def event_is_started(self):
return bool(self.rotation_start <= timezone.now())
@property
def event_is_finished(self):
if self.frequency is not None:
is_finished = bool(self.until <= timezone.now()) if self.until else False
else:
is_finished = bool(self.start + self.duration <= timezone.now())
return is_finished
def convert_to_ical(self, time_zone="UTC"):
result = ""
# use shift time_zone if it exists, otherwise use schedule or default time_zone
time_zone = self.time_zone if self.time_zone is not None else time_zone
# rolling_users shift converts to several ical events
if self.type in (CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, CustomOnCallShift.TYPE_OVERRIDE):
event_ical = None
# generate initial iCal for counting rotation start date
event_ical = self.generate_ical(self.start, user_counter=0)
rotations_created = 0
all_rotation_checked = False
users_queue = self.get_rolling_users()
for counter, users in enumerate(users_queue, start=1):
start = self.get_next_start_date(event_ical)
if not start: # means that rotation ends before next event starts
break
for user_counter, user in enumerate(users, start=1):
event_ical = self.generate_ical(user, start, user_counter, counter, time_zone)
result += event_ical
if not users_queue:
return result
if self.frequency is None:
users_queue = users_queue[:1]
# Get the date of the current rotation
if self.start == self.rotation_start or self.frequency is None:
start = self.start
else:
start = self.get_rotation_date(event_ical)
while not all_rotation_checked:
for counter, users in enumerate(users_queue, start=1):
if not start: # means that rotation ends before next event starts
all_rotation_checked = True
break
elif start >= self.rotation_start: # event has already started, generate iCal for each user
for user_counter, user in enumerate(users, start=1):
event_ical = self.generate_ical(start, user_counter, user, counter, time_zone)
result += event_ical
rotations_created += 1
else: # generate default iCal to calculate the date for the next rotation
event_ical = self.generate_ical(start, user_counter=0)
if rotations_created == len(users_queue): # means that we generated iCal for every user group
all_rotation_checked = True
break
# Use the flag 'get_next_date' to get the date of the next rotation
start = self.get_rotation_date(event_ical, get_next_date=True)
else:
for user_counter, user in enumerate(self.users.all(), start=1):
result += self.generate_ical(user, self.start, user_counter, time_zone=time_zone)
result += self.generate_ical(self.start, user_counter, user, time_zone=time_zone)
return result
def generate_ical(self, user, start, user_counter, counter=1, time_zone="UTC"):
# create event for each user in a list because we can't parse multiple users from ical summary
def generate_ical(self, start, user_counter, user=None, counter=1, time_zone="UTC"):
event = Event()
event["uid"] = f"oncall-{self.uuid}-PK{self.public_primary_key}-U{user_counter}-E{counter}-S{self.source}"
event.add("summary", self.get_summary_with_user_for_ical(user))
if user:
event.add("summary", self.get_summary_with_user_for_ical(user))
event.add("dtstart", self.convert_dt_to_schedule_timezone(start, time_zone))
event.add("dtend", self.convert_dt_to_schedule_timezone(start + self.duration, time_zone))
event.add("dtstamp", timezone.now())
@ -317,39 +343,61 @@ class CustomOnCallShift(models.Model):
summary += f"{user.username} "
return summary
def get_next_start_date(self, event_ical):
def get_rotation_date(self, event_ical, get_next_date=False):
"""Get date of the next event (for rolling_users shifts)"""
if event_ical is None:
return self.start
ONE_DAY = 1
ONE_HOUR = 1
current_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
current_event["rrule"]["INTERVAL"] = self.interval or 1
interval = self.interval or 1
current_event["rrule"]["INTERVAL"] = interval
current_event_start = current_event["DTSTART"].dt
next_event_start = current_event_start
ONE_DAY = 1
# Calculate the minimum start date for the next event based on rotation frequency. We don't need to do this
# for the first rotation, because in this case the min start date will be the same as the current event date.
if get_next_date:
if self.frequency == CustomOnCallShift.FREQUENCY_HOURLY:
next_event_start = current_event_start + timezone.timedelta(hours=ONE_HOUR)
elif self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
next_event_start = current_event_start + timezone.timedelta(days=ONE_DAY)
elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
# count days before the next week starts
days_for_next_event = DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
if days_for_next_event > DAYS_IN_A_WEEK:
days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
# count next event start date with respect to event interval
next_event_start = current_event_start + timezone.timedelta(
days=days_for_next_event + DAYS_IN_A_WEEK * (interval - 1)
)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
DAYS_IN_A_MONTH = monthrange(current_event_start.year, current_event_start.month)[1]
# count days before the next month starts
days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
# count next event start date with respect to event interval
for i in range(1, interval):
next_month_days = monthrange(current_event_start.year, current_event_start.month + i)[1]
days_for_next_event += next_month_days
next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
if self.frequency == CustomOnCallShift.FREQUENCY_HOURLY:
next_event_start = current_event_start + timezone.timedelta(hours=1)
elif self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
# test daily with byday
next_event_start = current_event_start + timezone.timedelta(days=ONE_DAY)
elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
end_date = None
# get the period for calculating the current rotation end date for long events with frequency weekly and monthly
if self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
days_for_next_event = DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
if days_for_next_event > DAYS_IN_A_WEEK:
days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
days_diff = 0
# get the last day of the week with respect to the week_start
if next_event_start.weekday() != self.week_start:
days_diff = DAYS_IN_A_WEEK + next_event_start.weekday() - self.week_start
days_diff %= DAYS_IN_A_WEEK
end_date = next_event_start + timezone.timedelta(days=DAYS_IN_A_WEEK - days_diff - ONE_DAY)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
DAYS_IN_A_MONTH = monthrange(self.start.year, self.start.month)[1]
# count days before the next month starts
days_for_next_event = DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
if days_for_next_event > DAYS_IN_A_MONTH:
days_for_next_event = days_for_next_event % DAYS_IN_A_MONTH
next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
# get the last day of the month
current_day_number = next_event_start.day
number_of_days = monthrange(next_event_start.year, next_event_start.month)[1]
days_diff = number_of_days - current_day_number
end_date = next_event_start + timezone.timedelta(days=days_diff)
# check if rotation ends before next event starts
if self.until and next_event_start > self.until:
return
next_event = None
# repetitions generate the next event shift according with the recurrence rules
repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
@ -357,10 +405,22 @@ class CustomOnCallShift(models.Model):
)
ical_iter = repetitions.__iter__()
for event in ical_iter:
if event.start.date() >= next_event_start.date():
next_event = event
break
next_event_dt = next_event.start if next_event is not None else None
if end_date: # end_date exists for long events with frequency weekly and monthly
if end_date >= event.start >= next_event_start:
if event.start >= self.rotation_start:
next_event = event
break
else:
break
else:
if event.start >= next_event_start:
next_event = event
break
next_event_dt = next_event.start if next_event is not None else next_event_start
if self.until and next_event_dt > self.until:
return
return next_event_dt
@cached_property

View file

@ -1,3 +1,5 @@
from calendar import monthrange
import pytest
from django.utils import timezone
@ -38,7 +40,7 @@ def test_get_on_call_users_from_web_schedule_override(make_organization_and_user
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
date = timezone.now().replace(tzinfo=None, microsecond=0)
date = timezone.now().replace(microsecond=0)
data = {
"start": date,
@ -105,7 +107,7 @@ def test_get_on_call_users_from_web_schedule_recurrent_event(
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
date = timezone.now().replace(tzinfo=None, microsecond=0)
date = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
@ -148,7 +150,7 @@ def test_get_on_call_users_from_rolling_users_event(
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
now = timezone.now().replace(tzinfo=None, microsecond=0)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
@ -190,6 +192,581 @@ def test_get_on_call_users_from_rolling_users_event(
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_event_with_interval_hourly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(hours=1),
"duration": timezone.timedelta(seconds=600),
"frequency": CustomOnCallShift.FREQUENCY_HOURLY,
"interval": 2,
"schedule": schedule,
}
rolling_users = [[user_1], [user_2]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_1_on_call_dates = [date + timezone.timedelta(hours=4)]
user_2_on_call_dates = [date + timezone.timedelta(hours=2), date + timezone.timedelta(hours=6)]
nobody_on_call_dates = [
date,
date + timezone.timedelta(hours=1),
date + timezone.timedelta(hours=3),
date + timezone.timedelta(hours=5),
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for date in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, date)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_event_with_interval_daily(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now,
"duration": timezone.timedelta(seconds=10800),
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"interval": 2,
"schedule": schedule,
}
rolling_users = [[user_1], [user_2]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_1_on_call_dates = [date, date + timezone.timedelta(days=4)]
user_2_on_call_dates = [date + timezone.timedelta(days=2), date + timezone.timedelta(days=6)]
nobody_on_call_dates = [
date + timezone.timedelta(days=1),
date + timezone.timedelta(days=3),
date + timezone.timedelta(days=5),
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_event_with_interval_weekly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(hours=1),
"duration": timezone.timedelta(seconds=10800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"interval": 2,
"week_start": now.weekday(),
"schedule": schedule,
}
rolling_users = [[user_1], [user_2]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
schedule.custom_on_call_shifts.add(on_call_shift)
date = now + timezone.timedelta(minutes=5)
user_1_on_call_dates = [date + timezone.timedelta(days=28)]
user_2_on_call_dates = [date + timezone.timedelta(days=14), date + timezone.timedelta(days=42)]
nobody_on_call_dates = [
date,
date + timezone.timedelta(days=7),
date + timezone.timedelta(days=21),
date + timezone.timedelta(days=35),
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_event_with_interval_monthly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
start_date = timezone.now().replace(day=1, microsecond=0)
days_for_next_month_1 = monthrange(start_date.year, start_date.month)[1]
days_for_next_month_2 = monthrange(start_date.year, start_date.month + 1)[1] + days_for_next_month_1
days_for_next_month_3 = monthrange(start_date.year, start_date.month + 2)[1] + days_for_next_month_2
days_for_next_month_4 = monthrange(start_date.year, start_date.month + 3)[1] + days_for_next_month_3
data = {
"priority_level": 1,
"start": start_date,
"rotation_start": start_date + timezone.timedelta(hours=1),
"duration": timezone.timedelta(seconds=10800),
"frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
"interval": 2,
"week_start": start_date.weekday(),
"schedule": schedule,
}
rolling_users = [[user_1], [user_2]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
schedule.custom_on_call_shifts.add(on_call_shift)
date = start_date + timezone.timedelta(minutes=5)
user_1_on_call_dates = [date + timezone.timedelta(days=days_for_next_month_4)]
user_2_on_call_dates = [date + timezone.timedelta(days=days_for_next_month_2)]
nobody_on_call_dates = [
date,
date + timezone.timedelta(days=days_for_next_month_1),
date + timezone.timedelta(days=days_for_next_month_3),
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_hourly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(hours=2),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_HOURLY,
"schedule": schedule,
"until": now + timezone.timedelta(hours=6, minutes=59),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
user_1_on_call_dates = [date + timezone.timedelta(hours=3), date + timezone.timedelta(hours=6)]
user_2_on_call_dates = [date + timezone.timedelta(hours=4)]
user_3_on_call_dates = [date + timezone.timedelta(hours=2), date + timezone.timedelta(hours=5)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(hours=1), # less than rotation start
date + timezone.timedelta(hours=7), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_daily(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(days=1, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
"until": now + timezone.timedelta(days=6, minutes=1),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
user_1_on_call_dates = [date + timezone.timedelta(days=3), date + timezone.timedelta(days=6)]
user_2_on_call_dates = [date + timezone.timedelta(days=4)]
user_3_on_call_dates = [date + timezone.timedelta(days=2), date + timezone.timedelta(days=5)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=1), # less than rotation start
date + timezone.timedelta(days=7), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"week_start": now.weekday(),
"rotation_start": now + timezone.timedelta(days=7, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=42, minutes=1),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# rotation starts from user_3, because user_1 and user_2 started earlier than rotation start date
user_1_on_call_dates = [date + timezone.timedelta(days=21), date + timezone.timedelta(days=42)]
user_2_on_call_dates = [date + timezone.timedelta(days=28)]
user_3_on_call_dates = [date + timezone.timedelta(days=14), date + timezone.timedelta(days=35)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=7), # less than rotation start
date + timezone.timedelta(days=43), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
today_weekday = now.weekday()
weekdays = [(today_weekday + 1) % 7, (today_weekday + 3) % 7]
by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
data = {
"priority_level": 1,
"start": now,
"week_start": today_weekday,
"rotation_start": now + timezone.timedelta(days=8, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=23, minutes=1),
"by_day": by_day,
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# week 1: weekdays[0] - no (+1 day from start) ; weekdays[1] - no (+3 days from start) user_1
# week 2: weekdays[0] - no (+8 days from start) ; weekdays[1] - yes (+10 days from start) user_2
# week 3: weekdays[0] - yes (+15 days from start) ; weekdays[1] - yes (+17 days from start) user_3
# week 4: weekdays[0] - yes (+22 days from start) ; weekdays[1] - no (+24 days from start) user_1
user_1_on_call_dates = [date + timezone.timedelta(days=22)]
user_2_on_call_dates = [date + timezone.timedelta(days=10)]
user_3_on_call_dates = [date + timezone.timedelta(days=15), date + timezone.timedelta(days=17)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=1), # less than rotation start
date + timezone.timedelta(days=3), # less than rotation start
date + timezone.timedelta(days=8), # less than rotation start
date + timezone.timedelta(days=9), # weekday value not in by_day
date + timezone.timedelta(days=24), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_monthly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(day=1, microsecond=0)
days_in_curr_month = monthrange(now.year, now.month)[1]
days_in_next_month = monthrange(now.year, now.month + 1)[1]
data = {
"priority_level": 1,
"start": now,
"week_start": now.weekday(),
"rotation_start": now + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# rotation starts from user_2, because user_1 started earlier than rotation start date
user_2_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month)]
user_3_on_call_dates = [date + timezone.timedelta(days=days_in_curr_month + days_in_next_month)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=days_in_curr_month - 1), # less than rotation start
date + timezone.timedelta(days=days_in_curr_month + 1), # higher than event end
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 2), # higher than event end
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 11), # higher than until
]
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_monthly_by_monthday(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
start_date = timezone.now().replace(day=1, microsecond=0)
days_in_curr_month = monthrange(start_date.year, start_date.month)[1]
days_in_next_month = monthrange(start_date.year, start_date.month + 1)[1]
data = {
"priority_level": 1,
"start": start_date,
"week_start": start_date.weekday(),
"rotation_start": start_date + timezone.timedelta(days=days_in_curr_month - 1, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
"schedule": schedule,
"until": start_date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 10, minutes=1),
"by_monthday": [i for i in range(1, 5)],
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = start_date + timezone.timedelta(minutes=5)
# rotation starts from user_2, because user_1 started earlier than rotation start date
user_2_on_call_dates = [
date + timezone.timedelta(days=days_in_curr_month),
date + timezone.timedelta(days=days_in_curr_month + 1),
date + timezone.timedelta(days=days_in_curr_month + 2),
date + timezone.timedelta(days=days_in_curr_month + 3),
]
user_3_on_call_dates = [
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month),
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 1),
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 2),
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 3),
]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=3), # less than rotation start
date + timezone.timedelta(days=days_in_curr_month + 4), # out of by_monthday range
date + timezone.timedelta(days=days_in_curr_month + 6), # out of by_monthday range
date + timezone.timedelta(days=days_in_curr_month + 10), # out of by_monthday range
date + timezone.timedelta(days=days_in_curr_month + days_in_next_month + 11), # higher than until
]
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_for_empty_schedule(
make_organization,
@ -299,3 +876,39 @@ def test_shift_convert_to_ical(make_organization_and_user, make_on_call_shift):
ical_rrule_until = on_call_shift.until.strftime("%Y%m%dT%H%M%S")
expected_rrule = f"RRULE:FREQ=HOURLY;UNTIL={ical_rrule_until}Z;INTERVAL=1;WKST=SU"
assert expected_rrule in ical_data
@pytest.mark.django_db
def test_rolling_users_shift_convert_to_ical(
make_organization_and_user,
make_user_for_organization,
make_on_call_shift,
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
date = timezone.now().replace(microsecond=0)
until = date + timezone.timedelta(days=30)
data = {
"priority_level": 1,
"start": date,
"rotation_start": date,
"duration": timezone.timedelta(seconds=10800),
"frequency": CustomOnCallShift.FREQUENCY_HOURLY,
"interval": 2,
"until": until,
}
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
rolling_users = [[user_1], [user_2]]
on_call_shift.add_rolling_users(rolling_users)
ical_data = on_call_shift.convert_to_ical()
ical_rrule_until = on_call_shift.until.strftime("%Y%m%dT%H%M%S")
expected_rrule = f"RRULE:FREQ=HOURLY;UNTIL={ical_rrule_until}Z;INTERVAL=4;WKST=SU"
assert on_call_shift.event_interval == len(rolling_users) * data["interval"]
assert expected_rrule in ical_data

View file

@ -247,10 +247,6 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
if new_value is None and old_value is not None:
setattr(alert_receive_channel, attr_name, None)
alert_receive_channel.save()
# Drop caches for current alert group
if notification_channel == "web":
setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
alert_group.save()
elif new_value is not None:
default_values = getattr(
AlertReceiveChannel,
@ -265,18 +261,10 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
jinja_template_env.from_string(new_value)
setattr(alert_receive_channel, attr_name, new_value)
alert_receive_channel.save()
# Drop caches for current alert group
if notification_channel == "web":
setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
alert_group.save()
elif default_value is not None and new_value.strip() == default_value.strip():
new_value = None
setattr(alert_receive_channel, attr_name, new_value)
alert_receive_channel.save()
# Drop caches for current alert group
if notification_channel == "web":
setattr(alert_group, f"cached_render_for_web_{templatizable_attr}", None)
alert_group.save()
except TemplateSyntaxError:
return Response(
{"response_action": "errors", "errors": {attr_name: "Template has incorrect format"}},

View file

@ -674,7 +674,6 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
add_to_resolution_note = True if value["msg_value"].startswith("add") else False
slack_thread_message = None
resolution_note = None
drop_ag_cache = False
alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
@ -695,7 +694,6 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
else:
resolution_note.recreate()
self.add_resolution_note_reaction(slack_thread_message)
drop_ag_cache = True
elif not add_to_resolution_note:
# Check if resolution_note can be removed
if (
@ -720,13 +718,9 @@ class AddRemoveThreadMessageStep(UpdateResolutionNoteStep, scenario_step.Scenari
slack_thread_message.added_to_resolution_note = False
slack_thread_message.save(update_fields=["added_to_resolution_note"])
self.remove_resolution_note_reaction(slack_thread_message)
drop_ag_cache = True
self.update_alert_group_resolution_note_button(
alert_group,
)
if drop_ag_cache:
alert_group.drop_cached_after_resolve_report_json()
alert_group.schedule_cache_for_web()
resolution_note_data = json.loads(payload["actions"][0]["value"])
resolution_note_data["resolution_note_window_action"] = "edit_update"
ResolutionNoteModalStep(slack_team_identity, self.organization, self.user).process_scenario(

View file

@ -39,8 +39,6 @@ class LiveSettingDjangoStrategy(DjangoStrategy):
"""
if live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None:
return create_engine_url(path, override_base=live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST)
if settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None:
return create_engine_url(path, override_base=settings.SLACK_INSTALL_RETURN_REDIRECT_HOST)
if self.request:
return self.request.build_absolute_uri(path)
else:

View file

@ -36,9 +36,6 @@ class TelegramClient:
def register_webhook(self, webhook_url: Optional[str] = None) -> None:
webhook_url = webhook_url or create_engine_url("/telegram/", override_base=live_settings.TELEGRAM_WEBHOOK_HOST)
if webhook_url is None:
webhook_url = live_settings.TELEGRAM_WEBHOOK_URL
webhook_info = self.api_client.get_webhook_info()
if webhook_info.url == webhook_url:
return

View file

@ -1,7 +1,6 @@
import logging
from functools import wraps
from django.core.exceptions import ImproperlyConfigured
from telegram import error
from apps.telegram.client import TelegramClient
@ -14,7 +13,7 @@ def handle_missing_token(f):
def decorated(*args, **kwargs):
try:
TelegramClient()
except (ImproperlyConfigured, error.InvalidToken) as e:
except error.InvalidToken as e:
logger.warning(
"Tried to initialize a Telegram client, but TELEGRAM_TOKEN live setting is invalid or missing. "
f"Exception: {e}"

View file

@ -1,4 +1,3 @@
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from telegram import error
@ -67,7 +66,7 @@ class TelegramToUserConnector(models.Model):
def send_full_incident(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy) -> None:
try:
telegram_client = TelegramClient()
except (ImproperlyConfigured, error.InvalidToken):
except error.InvalidToken:
TelegramToUserConnector.create_telegram_notification_error(
alert_group,
self.user,
@ -125,7 +124,7 @@ class TelegramToUserConnector(models.Model):
def send_link_to_channel_message(self, alert_group: AlertGroup, notification_policy: UserNotificationPolicy):
try:
telegram_client = TelegramClient()
except (ImproperlyConfigured, error.InvalidToken):
except error.InvalidToken:
TelegramToUserConnector.create_telegram_notification_error(
alert_group,
self.user,

View file

@ -33,7 +33,7 @@ def register_telegram_webhook(token=None):
try:
telegram_client.register_webhook()
except (error.InvalidToken, error.Unauthorized) as e:
except (error.InvalidToken, error.Unauthorized, error.BadRequest) as e:
logger.warning(f"Tried to register Telegram webhook using token: {telegram_client.token}, got error: {e}")

View file

@ -251,7 +251,7 @@ class PhoneCall(models.Model):
if phone_calls_left < 3:
message_body += " {} phone calls left. Contact your admin.".format(phone_calls_left)
twilio_call = twilio_client.make_call(message_body, user.verified_phone_number)
twilio_call = twilio_client.make_call(message_body, user.verified_phone_number, grafana_cloud=grafana_cloud)
if twilio_call.status and twilio_call.sid:
phone_call.status = TwilioCallStatuses.DETERMINANT.get(twilio_call.status, None)
phone_call.sid = twilio_call.sid

View file

@ -1,3 +1,4 @@
import urllib
from unittest import mock
import pytest
@ -11,6 +12,13 @@ from rest_framework.test import APIClient
from apps.base.models import UserNotificationPolicy
from apps.twilioapp.constants import TwilioCallStatuses
from apps.twilioapp.models import PhoneCall
from apps.twilioapp.utils import get_gather_message
class FakeTwilioCall:
def __init__(self):
self.sid = "123"
self.status = TwilioCallStatuses.COMPLETED
@pytest.fixture
@ -268,3 +276,58 @@ def test_wrong_pressed_digit(mock_has_permission, mock_get_gather_url, phone_cal
assert response.status_code == 200
assert "Wrong digit" in content
@mock.patch("apps.twilioapp.twilio_client.Client")
@pytest.mark.django_db
def test_make_cloud_phone_call_not_gathering_digit(mock_twilio_client, make_organization, make_user):
organization = make_organization()
user = make_user(organization=organization, _verified_phone_number="9999555")
mock_twilio_client.return_value.calls.create.return_value = FakeTwilioCall()
PhoneCall.make_grafana_cloud_call(user, "the message")
gather_message = urllib.parse.quote(get_gather_message())
assert gather_message not in mock_twilio_client.return_value.calls.create.call_args.kwargs["url"]
@mock.patch("apps.twilioapp.twilio_client.Client")
@pytest.mark.django_db
def test_make_phone_call_gathering_digit(
mock_twilio_client,
make_organization,
make_user,
make_user_notification_policy,
make_alert_receive_channel,
make_alert_group,
make_alert,
):
organization = make_organization()
user = make_user(organization=organization, _verified_phone_number="9999555")
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
notification_policy = make_user_notification_policy(
user=user,
step=UserNotificationPolicy.Step.NOTIFY,
notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL,
)
make_alert(
alert_group,
raw_request_data={
"status": "firing",
"labels": {
"alertname": "TestAlert",
"region": "eu-1",
},
"annotations": {},
"startsAt": "2018-12-25T15:47:47.377363608Z",
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
},
)
mock_twilio_client.return_value.calls.create.return_value = FakeTwilioCall()
PhoneCall.make_call(user, alert_group, notification_policy)
gather_message = urllib.parse.quote(get_gather_message())
assert gather_message in mock_twilio_client.return_value.calls.create.call_args.kwargs["url"]

View file

@ -126,19 +126,22 @@ class TwilioClient:
)
self.make_call(message=message, to=to)
def make_call(self, message, to):
def make_call(self, message, to, grafana_cloud=False):
try:
start_message = message.replace('"', "")
twiml_query = urllib.parse.quote(
gather_message = (
(
f"<Response>"
f"<Say>{start_message}</Say>"
f'<Gather numDigits="1" action="{get_gather_url()}" method="POST">'
f"<Say>{get_gather_message()}</Say>"
f"</Gather>"
f"</Response>"
),
)
if not grafana_cloud
else ""
)
twiml_query = urllib.parse.quote(
f"<Response><Say>{start_message}</Say>{gather_message}</Response>",
safe="",
)

View file

@ -1,4 +1,4 @@
from rest_framework.pagination import PageNumberPagination
from rest_framework.pagination import CursorPagination, PageNumberPagination
class HundredPageSizePaginator(PageNumberPagination):
@ -11,3 +11,10 @@ class FiftyPageSizePaginator(PageNumberPagination):
class TwentyFivePageSizePaginator(PageNumberPagination):
page_size = 25
class TwentyFiveCursorPaginator(CursorPagination):
page_size = 25
max_page_size = 100
page_size_query_param = "perpage"
ordering = "-pk"

View file

@ -1,21 +0,0 @@
import random
from django.conf import settings
class UseRandomReadonlyDbManagerMixin:
"""
Use this Mixin in ModelManagers, when you want to use the random readonly replica
"""
@property
def using_readonly_db(self):
"""Select one of the readonly databases this QuerySet should execute against."""
if hasattr(settings, "READONLY_DATABASES") and len(settings.READONLY_DATABASES) > 0:
using_db = random.choice(list(settings.READONLY_DATABASES.keys()))
return self.using(using_db)
else:
# Use "default" database
# Django uses the database with the alias of default when no other database has been selected.
# https://docs.djangoproject.com/en/3.2/topics/db/multi-db/#defining-your-databases
return self.using("default")

View file

@ -78,6 +78,9 @@ SENDGRID_INBOUND_EMAIL_DOMAIN = os.environ.get("SENDGRID_INBOUND_EMAIL_DOMAIN")
GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get("GRAFANA_CLOUD_ONCALL_API_URL", "https://a-prod-us-central-0.grafana.net")
GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None)
# Outgoing webhook settings
DANGEROUS_WEBHOOKS_ENABLED = getenv_boolean("DANGEROUS_WEBHOOKS_ENABLED", default=False)
# Application definition
INSTALLED_APPS = [

View file

@ -32,10 +32,6 @@ DATABASES = {
TESTING = "pytest" in sys.modules or "unittest" in sys.modules
READONLY_DATABASES = {}
# Dictionaries concatenation, introduced in python3.9
DATABASES = DATABASES | READONLY_DATABASES
CACHES = {
"default": {

View file

@ -84,12 +84,11 @@ CELERY_TASK_ROUTES = {
"apps.alerts.tasks.create_contact_points_for_datasource.create_contact_points_for_datasource": {"queue": "default"},
"apps.alerts.tasks.sync_grafana_alerting_contact_points.sync_grafana_alerting_contact_points": {"queue": "default"},
"apps.alerts.tasks.delete_alert_group.delete_alert_group": {"queue": "default"},
"apps.alerts.tasks.invalidate_web_cache_for_alert_group.invalidate_web_cache_for_alert_group": {"queue": "default"},
"apps.alerts.tasks.invalidate_web_cache_for_alert_group.invalidate_web_cache_for_alert_group": {
"queue": "default"
}, # todo: remove
"apps.alerts.tasks.send_alert_group_signal.send_alert_group_signal": {"queue": "default"},
"apps.alerts.tasks.wipe.wipe": {"queue": "default"},
# TODO: remove cache_alert_group_for_web and schedule_cache_for_alert_group once existing task will be processed
"apps.api.tasks.cache_alert_group_for_web": {"queue": "default"},
"apps.api.tasks.schedule_cache_for_alert_group": {"queue": "default"},
"apps.heartbeat.tasks.heartbeat_checkup": {"queue": "default"},
"apps.heartbeat.tasks.integration_heartbeat_checkup": {"queue": "default"},
"apps.heartbeat.tasks.process_heartbeat_task": {"queue": "default"},

View file

@ -49,3 +49,11 @@
.payloadExample {
margin-top: 24px;
}
.autoresolve-condition section {
border: 1px solid var(--primary-text-link);
}
.autoresolve-label {
margin-bottom: 0 !important;
}

View file

@ -1,6 +1,7 @@
import React, { useCallback, useEffect, useMemo, useState } from 'react';
import { SelectableValue } from '@grafana/data';
import { getLocationSrv } from '@grafana/runtime';
import { Label, Button, HorizontalGroup, VerticalGroup, Select, LoadingPlaceholder } from '@grafana/ui';
import { capitalCase } from 'change-case';
import cn from 'classnames/bind';
@ -33,6 +34,7 @@ interface AlertTemplatesFormProps {
demoAlertEnabled: boolean;
handleSendDemoAlertClick: () => void;
templatesRefreshing: boolean;
selectedTemplateName?: string;
}
const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
@ -45,6 +47,7 @@ const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
demoAlertEnabled,
handleSendDemoAlertClick,
templatesRefreshing,
selectedTemplateName,
} = props;
const [tempValues, setTempValues] = useState<{
@ -117,15 +120,29 @@ const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
[groups, activeGroup]
);
const getGroupByTemplateName = (templateName: string) => {
Object.values(groups).find((group) => {
const foundTemplate = group.find((obj: any) => {
if (obj.name == templateName) {
return obj;
}
});
setActiveGroup(foundTemplate?.group);
});
};
const handleChangeActiveGroup = useCallback((group: SelectableValue) => {
setActiveGroup(group.value);
}, []);
useEffect(() => {
const groupsArr = Object.keys(groups);
if (!activeGroup && groupsArr.length) {
setActiveGroup(groupsArr[0]);
if (selectedTemplateName) {
getGroupByTemplateName(selectedTemplateName);
} else {
if (!activeGroup && groupsArr.length) {
setActiveGroup(groupsArr[0]);
}
}
}, [groups, activeGroup]);
@ -134,6 +151,7 @@ const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
setActiveTemplate(groups[activeGroup][0]);
}
}, [activeGroup]);
const getTemplatePreviewEditClickHandler = (templateName: string) => {
return () => {
const template = templatesToRender.find((template) => template.name === templateName);
@ -163,6 +181,9 @@ const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
) : null}
</HorizontalGroup>
);
const handleGoToTemplateSettingsCllick = () => {
getLocationSrv().update({ partial: true, query: { tab: 'Autoresolve' } });
};
return (
<div className={cx('root')}>
@ -203,9 +224,20 @@ const AlertTemplatesForm = (props: AlertTemplatesFormProps) => {
key={activeTemplate.name}
className={cx('template-form', {
'template-form-full': true,
'autoresolve-condition': selectedTemplateName && activeTemplate.name == 'resolve_condition_template',
})}
>
<Label>{getLabelFromTemplateName(activeTemplate.name, activeGroup)}</Label>
<Label className={cx({ 'autoresolve-label': activeTemplate.name == 'resolve_condition_template' })}>
{getLabelFromTemplateName(activeTemplate.name, activeGroup)}
</Label>
{activeTemplate.name == 'resolve_condition_template' && (
<Text type="secondary" size="small">
To activate autoresolving change integration
<Button fill="text" size="sm" onClick={handleGoToTemplateSettingsCllick}>
settings
</Button>
</Text>
)}
<MonacoJinja2Editor
value={tempValues[activeTemplate.name] ?? (templates[activeTemplate.name] || '')}
disabled={false}

View file

@ -0,0 +1,3 @@
.root {
display: block;
}

View file

@ -0,0 +1,79 @@
import React, { FC, useCallback, useEffect, useState } from 'react';
import { SelectableValue } from '@grafana/data';
import { Button, HorizontalGroup, Icon, Select } from '@grafana/ui';
import cn from 'classnames/bind';
import Text from 'components/Text/Text';
import styles from './CursorPagination.module.css';
interface CursorPaginationProps {
current: string;
onChange: (cursor: string, direction: 'prev' | 'next') => void;
itemsPerPageOptions: Array<SelectableValue<number>>;
itemsPerPage: number;
onChangeItemsPerPage: (value: number) => void;
prev: string;
next: string;
}
const cx = cn.bind(styles);
const CursorPagination: FC<CursorPaginationProps> = (props) => {
const { current, onChange, prev, next, itemsPerPage, itemsPerPageOptions, onChangeItemsPerPage } = props;
const [disabled, setDisabled] = useState<boolean>(false);
useEffect(() => {
setDisabled(false);
}, [prev, next]);
const onChangeItemsPerPageCallback = useCallback((option) => {
setDisabled(true);
onChangeItemsPerPage(option.value);
}, []);
return (
<HorizontalGroup spacing="md" justify="flex-end">
<HorizontalGroup>
<Text type="secondary">Items per list</Text>
<Select
isSearchable={false}
options={itemsPerPageOptions}
value={itemsPerPage}
onChange={onChangeItemsPerPageCallback}
/>
</HorizontalGroup>
<HorizontalGroup>
<Button
aria-label="previous"
size="sm"
variant="secondary"
onClick={() => {
setDisabled(true);
onChange(prev, 'prev');
}}
disabled={disabled || !prev}
>
<Icon name="angle-left" />
</Button>
<Text type="secondary">{current}</Text>
<Button
aria-label="previous"
size="sm"
variant="secondary"
onClick={() => {
setDisabled(true);
onChange(next, 'next');
}}
disabled={disabled || !next}
>
<Icon name="angle-right" />
</Button>
</HorizontalGroup>
</HorizontalGroup>
);
};
export default CursorPagination;

View file

@ -714,16 +714,18 @@ class AlertRules extends React.Component<AlertRulesProps, AlertRulesState> {
)}
escalate to{' '}
<WithPermissionControl userAction={UserAction.UpdateAlertReceiveChannels}>
<GSelect
showSearch
modelName="escalationChainStore"
displayField="name"
placeholder="Select Escalation Chain"
className={cx('select', 'control', 'no-trigger-collapse-please')}
value={channelFilter.escalation_chain}
onChange={this.getEscalationChainChangeHandler(channelFilterId)}
showWarningIfEmptyValue={true}
/>
<div onClick={(e) => e.stopPropagation()}>
<GSelect
showSearch
modelName="escalationChainStore"
displayField="name"
placeholder="Select Escalation Chain"
className={cx('select', 'control', 'no-trigger-collapse-please')}
value={channelFilter.escalation_chain}
onChange={this.getEscalationChainChangeHandler(channelFilterId)}
showWarningIfEmptyValue={true}
/>
</div>
</WithPermissionControl>
</div>
<div onClick={(e) => e.stopPropagation()}>{this.renderChannelFilterButtons(channelFilterId, index)}</div>

View file

@ -17,10 +17,11 @@ interface TeamEditContainerProps {
onUpdate?: () => void;
onUpdateTemplates?: () => void;
visible?: boolean;
selectedTemplateName?: string;
}
const AlertTemplatesFormContainer = observer((props: TeamEditContainerProps) => {
const { alertReceiveChannelId, alertGroupId, onUpdateTemplates } = props;
const { alertReceiveChannelId, alertGroupId, onUpdateTemplates, selectedTemplateName } = props;
const store = useStore();
@ -71,6 +72,7 @@ const AlertTemplatesFormContainer = observer((props: TeamEditContainerProps) =>
demoAlertEnabled={alertReceiveChannel?.demo_alert_enabled}
handleSendDemoAlertClick={handleSendDemoAlertClickCallback}
templatesRefreshing={templatesRefreshing}
selectedTemplateName={selectedTemplateName}
/>
);
});

View file

@ -37,7 +37,7 @@ const cx = cn.bind(styles);
interface IncidentsFiltersProps extends WithStoreProps {
value: IncidentsFiltersType;
onChange: (filters: { [key: string]: any }) => void;
onChange: (filters: { [key: string]: any }, isOnMount: boolean) => void;
query: { [key: string]: any };
}
interface IncidentsFiltersState {
@ -79,7 +79,9 @@ class IncidentsFilters extends Component<IncidentsFiltersProps, IncidentsFilters
({ filters, values } = parseFilters(newQuery, filterOptions));
}
this.setState({ filterOptions, filters, values }, this.onChange);
this.setState({ filterOptions, filters, values }, () => {
this.onChange(true);
});
}
render() {
@ -421,11 +423,11 @@ class IncidentsFilters extends Component<IncidentsFiltersProps, IncidentsFilters
};
};
onChange = () => {
onChange = (isOnMount = false) => {
const { onChange } = this.props;
const { values } = this.state;
onChange(values);
onChange(values, isOnMount);
};
debouncedOnChange = debounce(this.onChange, 500);

View file

@ -1,5 +1,6 @@
import React, { useCallback, useEffect, useState } from 'react';
import { getLocationSrv, setLocationSrv } from '@grafana/runtime';
import {
Drawer,
Tab,
@ -47,6 +48,7 @@ interface IntegrationSettingsProps {
const IntegrationSettings = observer((props: IntegrationSettingsProps) => {
const { id, onHide, onUpdate, onUpdateTemplates, startTab, alertGroupId } = props;
const [activeTab, setActiveTab] = useState<IntegrationSettingsTab>(startTab || IntegrationSettingsTab.Templates);
const [selectedTemplate, setSelectedTemplate] = useState<string>('');
const store = useStore();
@ -57,6 +59,7 @@ const IntegrationSettings = observer((props: IntegrationSettingsProps) => {
const getTabClickHandler = useCallback((tab: IntegrationSettingsTab) => {
return () => {
setActiveTab(tab);
getLocationSrv().update({ partial: true, query: { tab: tab } });
};
}, []);
@ -64,9 +67,19 @@ const IntegrationSettings = observer((props: IntegrationSettingsProps) => {
alertReceiveChannelStore.updateItem(id);
}, []);
useEffect(() => {
setActiveTab(startTab || IntegrationSettingsTab.Templates);
getLocationSrv().update({ partial: true, query: { tab: startTab || IntegrationSettingsTab.Templates } });
}, [startTab]);
const integration = alertReceiveChannelStore.getIntegration(alertReceiveChannel);
const [expanded, setExpanded] = useState(false);
const handleSwitchToTemplate = (templateName: string) => {
setSelectedTemplate(templateName);
};
return (
<Drawer
scrollableContent
@ -148,6 +161,7 @@ const IntegrationSettings = observer((props: IntegrationSettingsProps) => {
onUpdate={onUpdate}
onHide={onHide}
onUpdateTemplates={onUpdateTemplates}
selectedTemplateName={selectedTemplate}
/>
)}
{activeTab === IntegrationSettingsTab.Heartbeat && (
@ -155,7 +169,13 @@ const IntegrationSettings = observer((props: IntegrationSettingsProps) => {
<HeartbeatForm alertReceveChannelId={id} onUpdate={onUpdate} />
</div>
)}
{activeTab === IntegrationSettingsTab.Autoresolve && <Autoresolve alertReceiveChannelId={id} />}
{activeTab === IntegrationSettingsTab.Autoresolve && (
<Autoresolve
alertReceiveChannelId={id}
onSwitchToTemplate={handleSwitchToTemplate}
alertGroupId={alertGroupId}
/>
)}
{/*{activeTab === IntegrationSettingsTab.LiveLogs && <LiveLogs alertReceiveChannelId={id} />}*/}
{activeTab === IntegrationSettingsTab.HowToConnect && (
<div className="container">

View file

@ -13,7 +13,7 @@
}
.team-select {
width: 300px;
width: 520px;
}
.team-select-actionbuttons {
@ -28,3 +28,15 @@
.confirmation-buttons .save-team-button {
margin-left: 8px;
}
.autoresolve-block {
height: 32px;
padding: 4px 8px;
margin-top: 8px;
min-width: 500px;
width: 520px;
}
.warning-icon-color {
color: var(--warning-text-color);
}

View file

@ -1,14 +1,17 @@
import React, { useCallback, useState } from 'react';
import React, { useCallback, useState, useEffect } from 'react';
import { Alert, Button, Label, Modal, Select } from '@grafana/ui';
import { getLocationSrv } from '@grafana/runtime';
import { Alert, Button, Icon, Label, Modal, Select } from '@grafana/ui';
import cn from 'classnames/bind';
import { get } from 'lodash-es';
import Block from 'components/GBlock/Block';
import PluginLink from 'components/PluginLink/PluginLink';
import Text from 'components/Text/Text';
import GSelect from 'containers/GSelect/GSelect';
import { WithPermissionControl } from 'containers/WithPermissionControl/WithPermissionControl';
import { AlertReceiveChannel } from 'models/alert_receive_channel/alert_receive_channel.types';
import { Alert as AlertType } from 'models/alertgroup/alertgroup.types';
import { Team } from 'models/team/team.types';
import { useStore } from 'state/useStore';
import { UserAction } from 'state/userAction';
@ -20,9 +23,11 @@ const cx = cn.bind(styles);
interface AutoresolveProps {
alertReceiveChannelId: AlertReceiveChannel['id'];
alertGroupId?: AlertType['pk'];
onSwitchToTemplate?: (templateName: string) => void;
}
const Autoresolve = ({ alertReceiveChannelId }: AutoresolveProps) => {
const Autoresolve = ({ alertReceiveChannelId, onSwitchToTemplate, alertGroupId }: AutoresolveProps) => {
const store = useStore();
const { alertReceiveChannelStore, grafanaTeamStore, userStore } = store;
@ -35,11 +40,36 @@ const Autoresolve = ({ alertReceiveChannelId }: AutoresolveProps) => {
const [autoresolveChanged, setAutoresolveChanged] = useState<boolean>(false);
const [autoresolveValue, setAutoresolveValue] = useState<boolean>(alertReceiveChannel?.allow_source_based_resolving);
const [showErrorOnTeamSelect, setShowErrorOnTeamSelect] = useState<boolean>(false);
const [autoresolveSelected, setAutoresolveSelected] = useState<boolean>(
alertReceiveChannel?.allow_source_based_resolving
);
const [autoresolveConditionInvalid, setAutoresolveConditionInvalid] = useState<boolean>(false);
useEffect(() => {
store.alertReceiveChannelStore.updateItem(alertReceiveChannelId);
store.alertReceiveChannelStore.updateTemplates(alertReceiveChannelId, alertGroupId);
}, [alertGroupId, alertReceiveChannelId, store]);
useEffect(() => {
const autoresolveCondition = get(
store.alertReceiveChannelStore.templates[alertReceiveChannelId],
'resolve_condition_template'
);
if (autoresolveCondition == ['invalid template']) {
setAutoresolveConditionInvalid(true);
}
}, [store.alertReceiveChannelStore.templates[alertReceiveChannelId]]);
const handleAutoresolveSelected = useCallback(
(autoresolveSelectedOption) => {
setAutoresolveChanged(true);
setAutoresolveValue(autoresolveSelectedOption?.value);
if (autoresolveSelectedOption?.value === 'true') {
setAutoresolveSelected(true);
}
if (autoresolveSelectedOption?.value === 'false') {
setAutoresolveSelected(false);
}
},
[autoresolveChanged]
);
@ -84,6 +114,11 @@ const Autoresolve = ({ alertReceiveChannelId }: AutoresolveProps) => {
}
};
const handleGoToTemplateSettingsCllick = () => {
getLocationSrv().update({ partial: true, query: { tab: 'Templates' } });
onSwitchToTemplate('resolve_condition_template');
};
return (
<>
<Block>
@ -124,12 +159,39 @@ const Autoresolve = ({ alertReceiveChannelId }: AutoresolveProps) => {
defaultValue={{ value: 'true', label: 'Automatically resolve' }}
value={autoresolveValue.toString()}
options={[
{ value: 'true', label: 'Automatically resolve' },
{ value: 'true', label: 'Resolve automatically' },
{ value: 'false', label: 'Resolve manually' },
]}
/>
</WithPermissionControl>
</div>
{autoresolveSelected && (
<>
<Block shadowed bordered className={cx('autoresolve-block')}>
<div>
<Text type="secondary" size="small">
<Icon name="info-circle" /> Incident will be automatically resolved when it matches{' '}
</Text>
<Button fill="text" size="sm" onClick={handleGoToTemplateSettingsCllick}>
autoresolve condition
</Button>
</div>
</Block>
{autoresolveConditionInvalid && (
<Block shadowed bordered className={cx('autoresolve-block')}>
<div>
<Text type="secondary" size="small">
<Icon name="exclamation-triangle" className={cx('warning-icon-color')} /> Autoresolving condition
template is invalid, please{' '}
</Text>
<Button fill="text" size="sm" onClick={handleGoToTemplateSettingsCllick}>
Edit it
</Button>
</div>
</Block>
)}
</>
)}
</div>
<div className={cx('team-select-actionbuttons')}>
<Button variant="primary" onClick={handleSaveClick}>

View file

@ -30,6 +30,8 @@ const MobileAppVerification = observer((props: MobileAppVerificationProps) => {
const userPk = (propsUserPk || userStore.currentUserPk) as User['pk'];
const user = userStore.items[userPk as User['pk']];
const isCurrent = userStore.currentUserPk === user.pk;
const action = isCurrent ? UserAction.UpdateOwnSettings : UserAction.UpdateOtherUsersSettings;
const { id = UserSettingsTab.UserInfo } = props;
@ -96,7 +98,7 @@ const MobileAppVerification = observer((props: MobileAppVerificationProps) => {
<Text>* This code is active only for a minute</Text>
</p>
<p>
<WithPermissionControl userAction={UserAction.UpdateOtherUsersSettings}>
<WithPermissionControl userAction={action}>
<Button
onClick={handleCreateMobileAppVerificationToken}
className={cx('iCal-button')}
@ -114,7 +116,7 @@ const MobileAppVerification = observer((props: MobileAppVerificationProps) => {
) : (
<>
<p>
<WithPermissionControl userAction={UserAction.UpdateOtherUsersSettings}>
<WithPermissionControl userAction={action}>
<Button
onClick={handleCreateMobileAppVerificationToken}
className={cx('iCal-button')}

View file

@ -36,7 +36,10 @@ export class AlertGroupStore extends BaseStore {
initialQuery = qs.parse(window.location.search);
@observable
incidentsPage: any = this.initialQuery.p ? Number(this.initialQuery.p) : 1;
incidentsCursor?: string;
@observable
incidentsItemsPerPage?: number;
@observable
alertsSearchResult: any = {};
@ -215,54 +218,69 @@ export class AlertGroupStore extends BaseStore {
}
@action
async updateIncidentFilters(params: any, resetPage = true) {
if (resetPage) {
this.incidentsPage = 1;
async updateIncidentFilters(params: any, keepCursor = false) {
if (!keepCursor) {
this.incidentsCursor = undefined;
}
this.incidentFilters = params;
this.updateIncidents();
}
@action
async setIncidentsPage(page: number) {
this.incidentsPage = page;
async setIncidentsCursor(cursor: string) {
this.incidentsCursor = cursor;
this.updateAlertGroups();
}
@action
async updateAlertGroups(skip_slow_rendering = true) {
this.alertGroupsLoading = skip_slow_rendering;
async setIncidentsItemsPerPage(value: number) {
this.incidentsCursor = undefined;
this.incidentsItemsPerPage = value;
const result = await makeRequest(`${this.path}`, {
this.updateAlertGroups();
}
@action
async updateAlertGroups() {
this.alertGroupsLoading = true;
const {
results,
next: nextRaw,
previous: previousRaw,
} = await makeRequest(`${this.path}`, {
params: {
...this.incidentFilters,
page: this.incidentsPage,
cursor: this.incidentsCursor,
perpage: this.incidentsItemsPerPage,
is_root: true,
skip_slow_rendering,
},
}).catch(refreshPageError);
const newAlerts = new Map(result.results.map((alert: Alert) => [alert.pk, alert]));
const prevCursor = previousRaw ? qs.parse(qs.extract(previousRaw)).cursor : previousRaw;
const nextCursor = nextRaw ? qs.parse(qs.extract(nextRaw)).cursor : nextRaw;
const newAlerts = new Map(
results.map((alert: Alert) => {
const oldAlert = this.alerts.get(alert.pk) || {};
const mergedAlertData = { ...oldAlert, ...alert };
return [alert.pk, mergedAlertData];
})
);
// @ts-ignore
this.alerts = new Map<number, Alert>([...this.alerts, ...newAlerts]);
this.alertsSearchResult['default'] = {
count: result.count,
results: result.results.map((alert: Alert) => alert.pk),
prev: prevCursor,
next: nextCursor,
results: results.map((alert: Alert) => alert.pk),
};
this.alertGroupsLoading = false;
if (skip_slow_rendering) {
const hasShortened = result.results.some((alert: Alert) => alert.short);
if (hasShortened) {
this.updateAlertGroups(false);
}
}
}
getAlertSearchResult(query: string) {
@ -273,27 +291,6 @@ export class AlertGroupStore extends BaseStore {
return this.alertsSearchResult[query].results.map((pk: Alert['pk']) => this.alerts.get(pk));
}
@action
async searchIncidents(search: string) {
const result = await makeRequest(`${this.path}`, {
params: {
search,
resolved: false,
is_root: true,
},
});
const newAlerts = new Map(result.results.map((alert: Alert) => [alert.pk, alert]));
// @ts-ignore
this.alerts = new Map<number, Alert>([...this.alerts, ...newAlerts]);
this.alertsSearchResult[search] = {
count: result.count,
results: result.results.map((alert: Alert) => alert.pk),
};
}
@action
getAlert(pk: Alert['pk']) {
return makeRequest(`${this.path}${pk}`, {}).then((alert: Alert) => {

View file

@ -42,17 +42,16 @@ export interface Alert {
title: string;
message: string;
image_url: string;
alerts: any[];
alerts?: any[];
acknowledged: boolean;
created_at: string;
acknowledged_at: string;
acknowledged_at_verbose: string;
acknowledged_by_user: User;
acknowledged_on_source: boolean;
channel: Channel;
permalink: string;
permalink?: string;
related_users: User[];
render_after_resolve_report_json: TimeLineItem[];
render_after_resolve_report_json?: TimeLineItem[];
render_for_slack: { attachments: any[] };
render_for_web: {
message: any;
@ -63,17 +62,13 @@ export interface Alert {
inside_organization_number: number;
resolved: boolean;
resolved_at: string;
resolved_at_verbose: string;
resolved_by: number;
resolved_by_user: User;
resolved_by_verbose: string;
silenced: boolean;
silenced_at: string;
silenced_at_verbose: string;
silenced_by_user: Partial<User>;
silenced_until: string;
started_at: string;
started_at_verbose: string;
last_alert_at: string;
verbose_name: string;
dependent_alert_groups: Alert[];

View file

@ -214,6 +214,7 @@ export function getActionButtons(incident: AlertType, cx: any, callbacks: { [key
key="silence"
disabled={incident.loading}
onSelect={onSilence}
buttonSize="sm"
/>
);
}

View file

@ -92,7 +92,7 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
render() {
const {
store,
query: { id },
query: { id, cursor, start, perpage },
} = this.props;
const { showIntegrationSettings, showAttachIncidentForm, notFound } = this.state;
@ -112,7 +112,7 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
<VerticalGroup spacing="lg" align="center">
<Text.Title level={1}>404</Text.Title>
<Text.Title level={4}>Incident not found</Text.Title>
<PluginLink query={{ page: 'incidents' }}>
<PluginLink query={{ page: 'incidents', cursor, start, perpage }}>
<Button variant="secondary" icon="arrow-left" size="md">
Go to incidents page
</Button>
@ -182,7 +182,7 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
renderHeader = () => {
const {
store,
query: { id },
query: { id, cursor, start, perpage },
} = this.props;
const { alerts } = store.alertGroupStore;
@ -197,7 +197,7 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
<Block withBackground>
<VerticalGroup>
<HorizontalGroup className={cx('title')}>
<PluginLink query={{ page: 'incidents' }}>
<PluginLink query={{ page: 'incidents', cursor, start, perpage }}>
<IconButton name="arrow-left" size="xxl" />
</PluginLink>
{/* @ts-ignore*/}
@ -293,7 +293,13 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
};
renderIncident = (incident: Alert) => {
const m = moment(incident.last_alert_at || incident.created_at);
let datetimeReference;
if (incident.last_alert_at || incident.created_at) {
const m = moment(incident.last_alert_at || incident.created_at);
datetimeReference = `(${m.fromNow()}, ${m.toString()})`;
}
return (
<div key={incident.pk} className={cx('incident')}>
<HorizontalGroup wrap>
@ -302,9 +308,7 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
? `#${incident.inside_organization_number} ${incident.render_for_web.title}`
: incident.render_for_web.title}
</Text.Title>
<Text type="secondary">
({m.fromNow()}, {m.toString()})
</Text>
<Text type="secondary">{datetimeReference}</Text>
</HorizontalGroup>
<div
className={cx('message')}
@ -326,6 +330,9 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
const incident = store.alertGroupStore.alerts.get(id);
const alerts = incident.alerts;
if (!alerts) {
return null;
}
const latestAlert = alerts[alerts.length - 1];
const latestAlertMoment = moment(latestAlert.created_at);
@ -407,6 +414,10 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
const incident = store.alertGroupStore.alerts.get(id);
if (!incident.render_after_resolve_report_json) {
return null;
}
const timeline = this.filterTimeline(incident.render_after_resolve_report_json);
const { timelineFilter, resolutionNoteText } = this.state;
const isResolutionNoteTextEmpty = resolutionNoteText === '';

View file

@ -34,3 +34,8 @@
height: 24px;
margin-right: 0;
}
.pagination {
width: 100%;
margin-top: 20px;
}

View file

@ -11,6 +11,7 @@ import moment from 'moment';
import Emoji from 'react-emoji-render';
import CardButton from 'components/CardButton/CardButton';
import CursorPagination from 'components/CursorPagination/CursorPagination';
import GTable from 'components/GTable/GTable';
import IntegrationLogo from 'components/IntegrationLogo/IntegrationLogo';
import PluginLink from 'components/PluginLink/PluginLink';
@ -35,7 +36,10 @@ import styles from './Incidents.module.css';
const cx = cn.bind(styles);
const ITEMS_PER_PAGE = 50;
interface Pagination {
start: number;
end: number;
}
function withSkeleton(fn: (alert: AlertType) => ReactElement | ReactElement[]) {
return (alert: AlertType) => {
@ -53,28 +57,41 @@ interface IncidentsPageState {
selectedIncidentIds: Array<Alert['pk']>;
affectedRows: { [key: string]: boolean };
filters?: IncidentsFiltersType;
pagination: Pagination;
}
const ITEMS_PER_PAGE = 25;
@observer
class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState> {
constructor(props: IncidentsPageProps) {
super(props);
const { store } = props;
const {
store,
query: { id, cursor: cursorQuery, start: startQuery, perpage: perpageQuery },
} = props;
const cursor = cursorQuery || undefined;
const start = !isNaN(startQuery) ? Number(startQuery) : 1;
const itemsPerPage = !isNaN(perpageQuery) ? Number(perpageQuery) : ITEMS_PER_PAGE;
store.alertGroupStore.incidentsCursor = cursor;
store.alertGroupStore.incidentsItemsPerPage = itemsPerPage;
this.state = {
selectedIncidentIds: [],
affectedRows: {},
pagination: {
start,
end: start + itemsPerPage - 1,
},
};
store.alertGroupStore.updateBulkActions();
store.alertGroupStore.updateSilenceOptions();
}
async componentDidMount() {}
componentDidUpdate() {}
render() {
return (
<div className={cx('root')}>
@ -95,24 +112,55 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
);
}
handleFiltersChange = (filters: IncidentsFiltersType) => {
handleFiltersChange = (filters: IncidentsFiltersType, isOnMount: boolean) => {
const { store } = this.props;
this.setState({ filters, selectedIncidentIds: [] });
this.setState({
filters,
selectedIncidentIds: [],
});
store.alertGroupStore.updateIncidentFilters(filters, true);
if (!isOnMount) {
this.setState({
pagination: {
start: 1,
end: store.alertGroupStore.incidentsItemsPerPage,
},
});
}
getLocationSrv().update({ query: { page: 'incidents', ...store.incidentFilters, p: store.incidentsPage } }); // todo fix
store.alertGroupStore.updateIncidentFilters(filters, isOnMount);
getLocationSrv().update({ query: { page: 'incidents', ...store.alertGroupStore.incidentFilters } });
};
onChangePagination = (page: number) => {
onChangeCursor = (cursor: string, direction: 'prev' | 'next') => {
const { store } = this.props;
store.alertGroupStore.setIncidentsPage(page);
store.alertGroupStore.setIncidentsCursor(cursor);
this.setState({ selectedIncidentIds: [] });
this.setState({
selectedIncidentIds: [],
pagination: {
start:
this.state.pagination.start + store.alertGroupStore.incidentsItemsPerPage * (direction === 'prev' ? -1 : 1),
end: this.state.pagination.end + store.alertGroupStore.incidentsItemsPerPage * (direction === 'prev' ? -1 : 1),
},
});
};
getLocationSrv().update({ partial: true, query: { p: store.incidentsPage } });
handleChangeItemsPerPage = (value: number) => {
const { store } = this.props;
store.alertGroupStore.setIncidentsItemsPerPage(value);
this.setState({
selectedIncidentIds: [],
pagination: {
start: 1,
end: store.alertGroupStore.incidentsItemsPerPage,
},
});
};
renderBulkActions = () => {
@ -214,7 +262,7 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
};
renderTable() {
const { selectedIncidentIds, affectedRows } = this.state;
const { selectedIncidentIds, affectedRows, pagination } = this.state;
const { store } = this.props;
const {
teamStore: { currentTeam },
@ -222,7 +270,8 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
const { alertGroupsLoading } = store.alertGroupStore;
const results = store.alertGroupStore.getAlertSearchResult('default');
const count = get(store.alertGroupStore.alertsSearchResult, `default.count`);
const prev = get(store.alertGroupStore.alertsSearchResult, `default.prev`);
const next = get(store.alertGroupStore.alertsSearchResult, `default.next`);
if (results && !results.length) {
return (
@ -319,12 +368,22 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
data={results}
columns={columns}
// rowClassName={getUserRowClassNameFn(userPkToEdit, userStore.currentUserPk)}
pagination={{
page: store.incidentsPage,
total: Math.ceil((count || 0) / ITEMS_PER_PAGE),
onChange: this.onChangePagination,
}}
/>
<div className={cx('pagination')}>
<CursorPagination
current={`${pagination.start}-${pagination.end}`}
itemsPerPage={store.alertGroupStore.incidentsItemsPerPage}
itemsPerPageOptions={[
{ label: '25', value: 25 },
{ label: '50', value: 50 },
{ label: '100', value: 100 },
]}
prev={prev}
next={next}
onChange={this.onChangeCursor}
onChangeItemsPerPage={this.handleChangeItemsPerPage}
/>
</div>
</div>
);
}
@ -338,9 +397,20 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
}
renderTitle = (record: AlertType) => {
const { store } = this.props;
const {
pagination: { start },
} = this.state;
const { incidentsItemsPerPage, incidentsCursor } = store.alertGroupStore;
return (
<VerticalGroup spacing="none" justify="center">
<PluginLink query={{ page: 'incident', id: record.pk }}>{record.render_for_web.title}</PluginLink>
<PluginLink
query={{ page: 'incident', id: record.pk, cursor: incidentsCursor, perpage: incidentsItemsPerPage, start }}
>
{record.render_for_web.title}
</PluginLink>
{Boolean(record.dependent_alert_groups.length) && `+ ${record.dependent_alert_groups.length} attached`}
</VerticalGroup>
);
@ -366,48 +436,6 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
renderStatus(record: AlertType) {
return getIncidentStatusTag(record);
/*if (record.resolved) {
return (
<div className={cx('status')}>
<Tooltip title={`Resolved ${record.resolved_at_verbose}`}>
<CheckCircleOutlined className={cx('icon-small')} style={{ color: '#52c41a' }} />
</Tooltip>
</div>
);
}
if (record.acknowledged) {
return (
<div className={cx('status')}>
<Tooltip title={`Acknowledged ${record.acknowledged_at_verbose}`}>
<Icon className={cx('icon-small')} component={AcknowledgedIncidentIcon} />
</Tooltip>
</div>
);
}
if (record.silenced) {
const silencedUntilText = record.silenced_until
? `Silenced until ${moment(record.silenced_until).toLocaleString()}`
: 'Silenced forever';
return (
<div className={cx('status')}>
<Tooltip title={silencedUntilText}>
<Icon className={cx('icon-small')} component={SilencedIncidentIcon} />
</Tooltip>
</div>
);
}
return (
<div className={cx('status')}>
<Tooltip title={`Started ${record.started_at_verbose}`}>
<Icon className={cx('icon-small')} component={NewIncidentIcon} />
</Tooltip>
</div>
);*/
}
renderStartedAt(alert: AlertType) {

View file

@ -12,10 +12,11 @@ interface SilenceDropdownProps {
onSelect: (value: number) => void;
className?: string;
disabled?: boolean;
buttonSize?: string;
}
const SilenceDropdown = observer((props: SilenceDropdownProps) => {
const { onSelect, className, disabled = false } = props;
const { onSelect, className, disabled = false, buttonSize } = props;
const onSelectCallback = useCallback(
([value, ...rest]) => {
@ -43,6 +44,7 @@ const SilenceDropdown = observer((props: SilenceDropdownProps) => {
label: silenceOption.display_name,
}))}
value={undefined}
buttonProps={{ size: buttonSize }}
>
Silence
</ButtonCascader>

View file

@ -74,6 +74,10 @@ class Integrations extends React.Component<IntegrationsProps, IntegrationsState>
`Integration with id=${query?.id} is not found. Please select integration from the list.`
);
}
if (query.tab) {
this.setState({ integrationSettingsTab: query.tab });
this.setState({ alertReceiveChannelToShowSettings: query.id });
}
}
if (!selectedAlertReceiveChannel) {
selectedAlertReceiveChannel = searchResult[0]?.id;
@ -90,6 +94,9 @@ class Integrations extends React.Component<IntegrationsProps, IntegrationsState>
if (this.props.query.id !== prevProps.query.id) {
this.parseQueryParams();
}
if (this.props.query.tab !== prevProps.query.tab) {
this.parseQueryParams();
}
}
componentWillUnmount() {
@ -200,12 +207,13 @@ class Integrations extends React.Component<IntegrationsProps, IntegrationsState>
}}
startTab={integrationSettingsTab}
id={alertReceiveChannelToShowSettings}
onHide={() =>
onHide={() => {
this.setState({
alertReceiveChannelToShowSettings: undefined,
integrationSettingsTab: undefined,
})
}
});
getLocationSrv().update({ partial: true, query: { tab: undefined } });
}}
/>
)}
{showCreateIntegrationModal && (

View file

@ -23,14 +23,17 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
- hosts:
- {{ .Values.base_url | quote }}
secretName: certificate-tls
{{- tpl (toYaml .Values.ingress.tls) . | nindent 4 }}
{{- end }}
rules:
- host: {{ .Values.base_url | quote }}
http:
paths:
{{- if .Values.ingress.extraPaths }}
{{ toYaml .Values.ingress.extraPaths | indent 6}}
{{- end }}
- path: /
pathType: Prefix
backend:

View file

@ -54,6 +54,25 @@ ingress:
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/issuer: "letsencrypt-prod"
tls:
- hosts:
- "{{ .Values.base_url }}"
secretName: certificate-tls
# Extra paths to prepend to the host configuration. If using something
# like an ALB ingress controller, you may want to configure SSL redirects
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
# Whether to install ingress controller
ingress-nginx:

View file

@ -57,7 +57,9 @@ def create_integration(
integration = oncall_api_client.create("integrations", payload)
default_route_id = integration["default_route_id"]
routes = oncall_api_client.list_all("routes/?integration_id={}".format(integration["id"]))
default_route_id = routes[0]["id"]
oncall_api_client.update(
f"routes/{default_route_id}", {"escalation_chain_id": escalation_chain_id}
)