This commit is contained in:
Joey Orlando 2023-12-28 09:37:08 -05:00 committed by GitHub
commit b6a4448112
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
59 changed files with 856 additions and 266 deletions

2
.gitignore vendored
View file

@ -10,3 +10,5 @@ venv
yarn.lock
node_modules
test-results

8
.prettierrc.js Normal file
View file

@ -0,0 +1,8 @@
overrides: [
{
files: ["*.yml", "*.yaml"],
options: {
singleQuote: false,
},
},
];

View file

@ -5,6 +5,29 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
## v1.3.81 (2023-12-28)
### Added
- Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516))
- Support PATCH method for outgoing webhooks by @ravishankar15 ([#3580](https://github.com/grafana/oncall/pull/3580))
### Changed
- Limit acknowledge reminders to stop repeating after 1 month @mderynck ([#3571](https://github.com/grafana/oncall/pull/3571))
### Fixed
- Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562))
- Fix alert group table columns validation @Ferril ([#3577](https://github.com/grafana/oncall/pull/3577))
- Fix posting message about rate limit to Slack @Ferril ([#3582](https://github.com/grafana/oncall/pull/3582))
- Fix issue with parsing sender email address from email message for inbound email integration endpoint @Ferril ([#3586](https://github.com/grafana/oncall/pull/3586))
- Fix PUT /api/v1/escalation_policies/id issue when updating `from_time` and `to_time` by @joeyorlando ([#3581](https://github.com/grafana/oncall/pull/3581))
- Fix issue where duplicate team options would show up in the teams dropdown for the `/escalate` Slack command
by @joeyorlando ([#3590](https://github.com/grafana/oncall/pull/3590))
## v1.3.80 (2023-12-14)
### Added

View file

@ -197,6 +197,15 @@ engine-manage: ## run Django's `manage.py` script, inside of a docker container
## https://docs.djangoproject.com/en/4.1/ref/django-admin/#django-admin-makemigrations
$(call run_engine_docker_command,python manage.py $(CMD))
test-e2e: ## run the e2e tests in headless mode
yarn --cwd grafana-plugin test:e2e
test-e2e-watch: ## start e2e tests in watch mode
yarn --cwd grafana-plugin test:e2e:watch
test-e2e-show-report: ## open last e2e test report
yarn --cwd grafana-plugin playwright show-report
ui-test: ## run the UI tests
$(call run_ui_docker_command,yarn test)

View file

@ -1,3 +1,4 @@
load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input')
running_under_parent_tiltfile = os.getenv("TILT_PARENT", "false") == "true"
# The user/pass that you will login to Grafana with
grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall")
@ -36,7 +37,7 @@ docker_build_sub(
"localhost:63628/oncall/engine:dev",
context="./engine",
cache_from=["grafana/oncall:latest", "grafana/oncall:dev"],
ignore=["./grafana-plugin/test-results/", "./grafana-plugin/dist/", "./grafana-plugin/e2e-tests/"],
ignore=["./test-results/", "./grafana-plugin/dist/", "./grafana-plugin/e2e-tests/"],
child_context=".",
target="dev",
extra_cmds=["ADD ./grafana-plugin/src/plugin.json /etc/grafana-plugin/src/plugin.json"],
@ -54,10 +55,56 @@ local_resource(
"build-ui",
labels=["OnCallUI"],
cmd="cd grafana-plugin && yarn install && yarn build:dev",
serve_cmd="cd grafana-plugin && ONCALL_API_URL=http://oncall-dev-engine:8080 yarn watch",
serve_cmd="cd grafana-plugin && yarn watch",
allow_parallel=True,
)
local_resource(
"e2e-tests",
labels=["E2eTests"],
cmd="cd grafana-plugin && yarn test:e2e",
trigger_mode=TRIGGER_MODE_MANUAL,
auto_init=False,
resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine"]
)
cmd_button(
name="E2E Tests - headless run",
argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE"],
text="Restart headless run",
resource="e2e-tests",
icon_name="replay",
inputs=[
text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"),
bool_input("REPORTER", "Use HTML reporter", True, 'html', 'line'),
bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""),
]
)
cmd_button(
name="E2E Tests - open watch mode",
argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"],
text="Open watch mode",
resource="e2e-tests",
icon_name="visibility",
)
cmd_button(
name="E2E Tests - show report",
argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"],
text="Show last HTML report",
resource="e2e-tests",
icon_name="assignment",
)
cmd_button(
name="E2E Tests - stop current run",
argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"],
text="Stop",
resource="e2e-tests",
icon_name="dangerous",
)
yaml = helm("helm/oncall", name=HELM_PREFIX, values=["./dev/helm-local.yml", "./dev/helm-local.dev.yml"])
k8s_yaml(yaml)

View file

@ -243,13 +243,18 @@ are run on pull request CI builds. New features should ideally include a new/mod
To run these tests locally simply do the following:
```bash
npx playwright install # install playwright dependencies
cp ./grafana-plugin/e2e-tests/.env.example ./grafana-plugin/e2e-tests/.env
# you may need to tweak the values in ./grafana-plugin/.env according to your local setup
cd grafana-plugin
yarn test:e2e
```
1. Install Playwright dependencies with `npx playwright install`
2. [Launch the environment](#launch-the-environment)
3. Then you interact with tests in 2 different ways:
1. Using `Tilt` - open _E2eTests_ section where you will find 4 buttons:
1. Restart headless run (you can configure browsers, reporter and failure allowance there)
2. Open watch mode
3. Show last HTML report
4. Stop (stops any pending e2e test process)
2. Using `make`:
1. `make test:e2e` to start headless run
2. `make test:e2e:watch` to open watch mode
3. `make test:e2e:show:report` to open last HTML report
## Helm unit tests

View file

@ -1,4 +1,4 @@
base_url: localhost:30001
base_url: localhost:8080
base_url_protocol: http
env:
- name: GRAFANA_CLOUD_NOTIFICATIONS_ENABLED

View file

@ -5,6 +5,7 @@ import typing
import pytz
from celery import uuid as celery_uuid
from dateutil.parser import parse
from django.utils import timezone
from django.utils.functional import cached_property
from rest_framework.exceptions import ValidationError
@ -212,6 +213,12 @@ class EscalationSnapshotMixin:
return False
return self.raw_escalation_snapshot.get("pause_escalation", False)
@property
def last_active_escalation_policy_order(self) -> typing.Optional[int]:
if not self.raw_escalation_snapshot:
return None
return self.raw_escalation_snapshot.get("last_active_escalation_policy_order")
@property
def next_step_eta(self) -> typing.Optional[datetime.datetime]:
"""
@ -223,6 +230,19 @@ class EscalationSnapshotMixin:
raw_next_step_eta = self.raw_escalation_snapshot.get("next_step_eta")
return None if not raw_next_step_eta else parse(raw_next_step_eta).replace(tzinfo=pytz.UTC)
def next_step_eta_is_valid(self) -> typing.Optional[bool]:
"""
`next_step_eta` should never be less than the current time (with a 5 minute buffer provided)
as this field should be updated as the escalation policy is executed over time. If it is, this means that
an escalation policy step has been missed, or is substantially delayed
if `next_step_eta` is `None` then `None` is returned, otherwise a boolean is returned
representing the result of the time comparision
"""
if self.next_step_eta is None:
return None
return self.next_step_eta > (timezone.now() - datetime.timedelta(minutes=5))
def update_next_step_eta(self, increase_by_timedelta: datetime.timedelta) -> typing.Optional[dict]:
"""
update next_step_eta field directly to avoid serialization overhead

View file

@ -3,7 +3,6 @@ import logging
import typing
from celery.utils.log import get_task_logger
from django.utils import timezone
from apps.alerts.escalation_snapshot.serializers import EscalationSnapshotSerializer
from apps.alerts.models.alert_group_log_record import AlertGroupLogRecord
@ -90,19 +89,6 @@ class EscalationSnapshot:
return []
return self.escalation_policies_snapshots[: self.last_active_escalation_policy_order + 1]
def next_step_eta_is_valid(self) -> typing.Optional[bool]:
"""
`next_step_eta` should never be less than the current time (with a 5 minute buffer provided)
as this field should be updated as the escalation policy is executed over time. If it is, this means that
an escalation policy step has been missed, or is substantially delayed
if `next_step_eta` is `None` then `None` is returned, otherwise a boolean is returned
representing the result of the time comparision
"""
if self.next_step_eta is None:
return None
return self.next_step_eta > (timezone.now() - datetime.timedelta(minutes=5))
def save_to_alert_group(self) -> None:
self.alert_group.raw_escalation_snapshot = self.convert_to_dict()
self.alert_group.save(update_fields=["raw_escalation_snapshot"])

View file

@ -489,6 +489,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
AlertGroup.ACCOUNT_INACTIVE,
AlertGroup.RATE_LIMITED,
AlertGroup.CHANNEL_NOT_SPECIFIED,
AlertGroup.RESTRICTED_ACTION,
)
def is_alert_a_resolve_signal(self, alert):

View file

@ -1,7 +1,9 @@
from datetime import timedelta
from functools import partial
from django.conf import settings
from django.db import transaction
from django.utils import timezone
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@ -61,6 +63,11 @@ def acknowledge_reminder_task(alert_group_pk: int, unacknowledge_process_id: str
(alert_group.pk, unacknowledge_process_id), countdown=unacknowledge_timeout
)
else:
if alert_group.started_at < timezone.now() - timedelta(days=settings.ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS):
task_logger.info(
f"alert group {alert_group_pk} not renewing acknowledgement reminder, started_at is too old. {log_info}"
)
return
acknowledge_reminder_task.apply_async(
(alert_group.pk, unacknowledge_process_id), countdown=acknowledge_reminder_timeout
)

View file

@ -4,7 +4,7 @@ import typing
import requests
from celery import shared_task
from django.conf import settings
from django.db.models import Avg, F, Max
from django.db.models import Avg, F, Max, Q
from django.utils import timezone
from apps.alerts.tasks.task_logger import task_logger
@ -29,26 +29,26 @@ def send_alert_group_escalation_auditor_task_heartbeat() -> None:
def audit_alert_group_escalation(alert_group: "AlertGroup") -> None:
escalation_snapshot = alert_group.escalation_snapshot
raw_escalation_snapshot: dict = alert_group.raw_escalation_snapshot
alert_group_id = alert_group.id
base_msg = f"Alert group {alert_group_id}"
if not alert_group.escalation_chain_exists:
if not raw_escalation_snapshot:
msg = f"{base_msg} does not have an escalation snapshot associated with it, this should never occur"
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
if not raw_escalation_snapshot.get("escalation_chain_snapshot"):
task_logger.info(
f"{base_msg} does not have an escalation chain associated with it, and therefore it is expected "
"that it will not have an escalation snapshot, skipping further validation"
)
return
if not escalation_snapshot:
msg = f"{base_msg} does not have an escalation snapshot associated with it, this should never occur"
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
task_logger.info(f"{base_msg} has an escalation snapshot associated with it, auditing if it executed properly")
escalation_policies_snapshots = escalation_snapshot.escalation_policies_snapshots
escalation_policies_snapshots = raw_escalation_snapshot.get("escalation_policies_snapshots")
if not escalation_policies_snapshots:
task_logger.info(
@ -59,18 +59,19 @@ def audit_alert_group_escalation(alert_group: "AlertGroup") -> None:
f"{base_msg}'s escalation snapshot has a populated escalation_policies_snapshots, continuing validation"
)
if escalation_snapshot.next_step_eta_is_valid() is False:
msg = (
f"{base_msg}'s escalation snapshot does not have a valid next_step_eta: {escalation_snapshot.next_step_eta}"
)
if alert_group.next_step_eta_is_valid() is False:
msg = f"{base_msg}'s escalation snapshot does not have a valid next_step_eta: {alert_group.next_step_eta}"
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
task_logger.info(f"{base_msg}'s escalation snapshot has a valid next_step_eta: {escalation_snapshot.next_step_eta}")
task_logger.info(f"{base_msg}'s escalation snapshot has a valid next_step_eta: {alert_group.next_step_eta}")
executed_escalation_policy_snapshots = escalation_snapshot.executed_escalation_policy_snapshots
num_of_executed_escalation_policy_snapshots = len(executed_escalation_policy_snapshots)
num_of_executed_escalation_policy_snapshots = (
alert_group.last_active_escalation_policy_order + 1
if alert_group.last_active_escalation_policy_order is not None
else 0
)
if num_of_executed_escalation_policy_snapshots == 0:
task_logger.info(
@ -81,9 +82,39 @@ def audit_alert_group_escalation(alert_group: "AlertGroup") -> None:
f"{base_msg}'s escalation snapshot has {num_of_executed_escalation_policy_snapshots} executed escalation policies"
)
check_personal_notifications_task.apply_async((alert_group_id,))
task_logger.info(f"{base_msg} passed the audit checks")
@shared_task
def check_personal_notifications_task(alert_group_id) -> None:
# Check personal notifications are completed
# triggered (< 5min ago) == failed + success
from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
triggered = UserNotificationPolicyLogRecord.objects.filter(
alert_group_id=alert_group_id,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
notification_step=UserNotificationPolicy.Step.NOTIFY,
created_at__lte=timezone.now() - timezone.timedelta(minutes=5),
).count()
completed = UserNotificationPolicyLogRecord.objects.filter(
Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED)
| Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS),
alert_group_id=alert_group_id,
notification_step=UserNotificationPolicy.Step.NOTIFY,
).count()
base_msg = f"Alert group {alert_group_id}"
delta = triggered - completed
if delta > 0:
# TODO: when success notifications are setup for every backend, raise exception here
task_logger.info(f"{base_msg} has ({delta}) uncompleted personal notifications")
else:
task_logger.info(f"{base_msg} personal notifications check passed")
@shared_task
def check_escalation_finished_task() -> None:
"""

View file

@ -287,23 +287,33 @@ def perform_notification(log_record_pk):
# Code below is not consistent.
# We check various slack reasons to skip escalation in this task, in send_slack_notification,
# before and after posting of slack message.
if alert_group.reason_to_skip_escalation == alert_group.RATE_LIMITED:
if alert_group.skip_escalation_in_slack:
notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK
if alert_group.reason_to_skip_escalation == alert_group.RATE_LIMITED:
notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT
elif alert_group.reason_to_skip_escalation == alert_group.CHANNEL_ARCHIVED:
notification_error_code = (
UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED
)
elif alert_group.reason_to_skip_escalation == alert_group.ACCOUNT_INACTIVE:
notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR
task_logger.debug(
f"send_slack_notification for alert_group {alert_group.pk} failed because of slack ratelimit."
f"send_slack_notification for alert_group {alert_group.pk} failed because escalation in slack is "
f"skipped, reason: '{alert_group.get_reason_to_skip_escalation_display()}'"
)
UserNotificationPolicyLogRecord(
author=user,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
notification_policy=notification_policy,
reason="Slack ratelimit",
reason=f"Skipped escalation in Slack, reason: '{alert_group.get_reason_to_skip_escalation_display()}'",
alert_group=alert_group,
notification_step=notification_policy.step,
notification_channel=notification_channel,
notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT,
notification_error_code=notification_error_code,
).save()
return
if alert_group.notify_in_slack_enabled is True and not log_record.slack_prevent_posting:
if alert_group.notify_in_slack_enabled is True:
# we cannot notify users in Slack if their team does not have Slack integration
if alert_group.channel.organization.slack_team_identity is None:
task_logger.debug(
@ -322,6 +332,22 @@ def perform_notification(log_record_pk):
).save()
return
if log_record.slack_prevent_posting:
task_logger.debug(
f"send_slack_notification for alert_group {alert_group.pk} failed because slack posting is disabled."
)
UserNotificationPolicyLogRecord(
author=user,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
notification_policy=notification_policy,
reason="Prevented from posting in Slack",
alert_group=alert_group,
notification_step=notification_policy.step,
notification_channel=notification_channel,
notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED,
).save()
return
retry_timeout_hours = 1
if alert_group.slack_message:
alert_group.slack_message.send_slack_notification(user, alert_group, notification_policy)

View file

@ -1,3 +1,4 @@
from datetime import timedelta
from unittest.mock import patch
import pytest
@ -339,3 +340,26 @@ def test_unacknowledge_timeout_task_skip_deleted_org(
mock_acknowledge_reminder_task.assert_not_called()
assert not alert_group.log_records.exists()
@patch.object(acknowledge_reminder_task, "apply_async")
@patch.object(unacknowledge_timeout_task, "apply_async")
@pytest.mark.django_db
def test_ack_reminder_cancel_too_old(
mock_acknowledge_reminder_task,
mock_unacknowledge_timeout_task,
ack_reminder_test_setup,
settings,
):
organization, alert_group, user = ack_reminder_test_setup(
unacknowledge_timeout=Organization.UNACKNOWLEDGE_TIMEOUT_NEVER
)
alert_group.started_at = timezone.now() - timedelta(days=settings.ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS + 1)
alert_group.save()
acknowledge_reminder_task(alert_group.pk, TASK_ID)
mock_unacknowledge_timeout_task.assert_not_called()
mock_acknowledge_reminder_task.assert_not_called()
assert not alert_group.log_records.exists()

View file

@ -5,12 +5,15 @@ import requests
from django.test import override_settings
from django.utils import timezone
from apps.alerts.models import EscalationPolicy
from apps.alerts.tasks.check_escalation_finished import (
AlertGroupEscalationPolicyExecutionAuditException,
audit_alert_group_escalation,
check_escalation_finished_task,
check_personal_notifications_task,
send_alert_group_escalation_auditor_task_heartbeat,
)
from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
MOCKED_HEARTBEAT_URL = "https://hello.com/lsdjjkf"
@ -85,7 +88,7 @@ def test_send_alert_group_escalation_auditor_task_heartbeat_raises_an_exception_
@pytest.mark.django_db
def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_not_have_an_escalation_chain(
def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_not_have_an_escalation_chain_snapshot(
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
@ -94,10 +97,10 @@ def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_n
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
alert_group.escalation_snapshot = None
alert_group.raw_escalation_snapshot = {"escalation_chain_snapshot": None}
alert_group.save()
assert alert_group.escalation_chain_exists is False
assert alert_group.raw_escalation_snapshot["escalation_chain_snapshot"] is None
try:
audit_alert_group_escalation(alert_group)
@ -110,7 +113,8 @@ def test_audit_alert_group_escalation_raises_exception_if_the_alert_group_does_n
escalation_snapshot_test_setup,
):
alert_group, _, _, _ = escalation_snapshot_test_setup
alert_group.escalation_snapshot = None
alert_group.raw_escalation_snapshot = None
alert_group.save()
with pytest.raises(AlertGroupEscalationPolicyExecutionAuditException):
audit_alert_group_escalation(alert_group)
@ -123,13 +127,16 @@ def test_audit_alert_group_escalation_skips_further_validation_if_the_escalation
alert_group, _, _, _ = escalation_snapshot_test_setup
alert_group.escalation_snapshot.escalation_policies_snapshots = []
alert_group.raw_escalation_snapshot = {"escalation_policies_snapshots": []}
alert_group.save()
audit_alert_group_escalation(alert_group)
alert_group.escalation_snapshot.escalation_policies_snapshots = None
alert_group.raw_escalation_snapshot["escalation_policies_snapshots"] = None
alert_group.save()
audit_alert_group_escalation(alert_group)
@patch("apps.alerts.escalation_snapshot.snapshot_classes.escalation_snapshot.EscalationSnapshot.next_step_eta_is_valid")
@patch("apps.alerts.escalation_snapshot.escalation_snapshot_mixin.EscalationSnapshotMixin.next_step_eta_is_valid")
@pytest.mark.django_db
@pytest.mark.parametrize(
"next_step_eta_is_valid_return_value,raises_exception",
@ -158,18 +165,18 @@ def test_audit_alert_group_escalation_next_step_eta_validation(
@patch(
"apps.alerts.escalation_snapshot.snapshot_classes.escalation_snapshot.EscalationSnapshot.executed_escalation_policy_snapshots",
"apps.alerts.escalation_snapshot.escalation_snapshot_mixin.EscalationSnapshotMixin.last_active_escalation_policy_order",
new_callable=PropertyMock,
)
@pytest.mark.django_db
def test_audit_alert_group_escalation_no_executed_escalation_policy_snapshots(
mock_executed_escalation_policy_snapshots, escalation_snapshot_test_setup
mock_last_active_escalation_policy_order, escalation_snapshot_test_setup
):
alert_group, _, _, _ = escalation_snapshot_test_setup
mock_executed_escalation_policy_snapshots.return_value = []
mock_last_active_escalation_policy_order.return_value = None
audit_alert_group_escalation(alert_group)
mock_executed_escalation_policy_snapshots.assert_called_once_with()
mock_last_active_escalation_policy_order.assert_called_once_with()
# # see TODO: comment in engine/apps/alerts/tasks/check_escalation_finished.py
@ -385,3 +392,125 @@ def test_check_escalation_finished_task_calls_audit_alert_group_escalation_for_e
mocked_audit_alert_group_escalation.assert_any_call(alert_group3)
mocked_send_alert_group_escalation_auditor_task_heartbeat.assert_not_called()
@patch("apps.alerts.tasks.check_escalation_finished.send_alert_group_escalation_auditor_task_heartbeat")
@pytest.mark.django_db
def test_check_escalation_finished_task_calls_audit_alert_group_personal_notifications(
mocked_send_alert_group_escalation_auditor_task_heartbeat,
make_organization_and_user,
make_user_notification_policy,
make_escalation_chain,
make_escalation_policy,
make_channel_filter,
make_alert_receive_channel,
make_alert_group_that_started_at_specific_date,
make_user_notification_policy_log_record,
caplog,
):
organization, user = make_organization_and_user()
user_notification_policy = make_user_notification_policy(
user=user,
step=UserNotificationPolicy.Step.NOTIFY,
notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
)
alert_receive_channel = make_alert_receive_channel(organization)
escalation_chain = make_escalation_chain(organization)
channel_filter = make_channel_filter(alert_receive_channel, escalation_chain=escalation_chain)
notify_to_multiple_users_step = make_escalation_policy(
escalation_chain=channel_filter.escalation_chain,
escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
)
notify_to_multiple_users_step.notify_to_users_queue.set([user])
alert_group1 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter)
alert_group2 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter)
alert_group3 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter)
alert_group4 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter)
alert_groups = [alert_group1, alert_group2, alert_group3, alert_group4]
for alert_group in alert_groups:
alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
alert_group.raw_escalation_snapshot["last_active_escalation_policy_order"] = 1
alert_group.save()
now = timezone.now()
# alert_group1: wait, notify user, notification successful
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group1,
notification_policy=user_notification_policy,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
notification_step=UserNotificationPolicy.Step.WAIT,
)
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group1,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
)
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group1,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS,
)
# records created > 5 mins ago
alert_group1.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7))
# alert_group2: notify user, notification failed
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group2,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
)
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group2,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED,
)
# records created > 5 mins ago
alert_group2.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7))
# alert_group3: notify user, missing completion
make_user_notification_policy_log_record(
author=user,
alert_group=alert_group3,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
)
# record created > 5 mins ago
alert_group3.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7))
# alert_group4: notify user created > 5 mins ago, missing completion
make_user_notification_policy_log_record(
author=user,
created_at=now - timezone.timedelta(minutes=3),
alert_group=alert_group3,
notification_policy=user_notification_policy,
notification_step=UserNotificationPolicy.Step.NOTIFY,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
)
# record created < 5 mins ago
alert_group4.personal_log_records.update(created_at=now - timezone.timedelta(minutes=2))
# trigger task
with patch("apps.alerts.tasks.check_escalation_finished.check_personal_notifications_task") as mock_check_notif:
check_escalation_finished_task()
for alert_group in alert_groups:
mock_check_notif.apply_async.assert_any_call((alert_group.id,))
check_personal_notifications_task(alert_group.id)
if alert_group == alert_group3:
assert f"Alert group {alert_group3.id} has (1) uncompleted personal notifications" in caplog.text
else:
assert f"Alert group {alert_group.id} personal notifications check passed" in caplog.text
mocked_send_alert_group_escalation_auditor_task_heartbeat.assert_called()

View file

@ -196,8 +196,10 @@ def test_next_step_eta_is_valid(escalation_snapshot_test_setup, next_step_eta, e
escalation_snapshot = alert_group.escalation_snapshot
escalation_snapshot.next_step_eta = next_step_eta
escalation_snapshot.save_to_alert_group()
alert_group.refresh_from_db()
assert escalation_snapshot.next_step_eta_is_valid() is expected
assert alert_group.next_step_eta_is_valid() is expected
@pytest.mark.django_db

View file

@ -2,10 +2,12 @@ from unittest.mock import patch
import pytest
from apps.alerts.models import AlertGroup
from apps.alerts.tasks.notify_user import notify_user_task, perform_notification
from apps.api.permissions import LegacyAccessControlRole
from apps.base.models.user_notification_policy import UserNotificationPolicy
from apps.base.models.user_notification_policy_log_record import UserNotificationPolicyLogRecord
from apps.slack.models import SlackMessage
NOTIFICATION_UNAUTHORIZED_MSG = "notification is not allowed for user"
@ -178,3 +180,108 @@ def test_notify_user_error_if_viewer(
assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
assert error_log_record.reason == NOTIFICATION_UNAUTHORIZED_MSG
assert error_log_record.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_FORBIDDEN
@pytest.mark.django_db
@pytest.mark.parametrize(
"reason_to_skip_escalation,error_code",
[
(AlertGroup.RATE_LIMITED, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT),
(AlertGroup.CHANNEL_ARCHIVED, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED),
(AlertGroup.ACCOUNT_INACTIVE, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR),
(AlertGroup.RESTRICTED_ACTION, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK),
(AlertGroup.NO_REASON, None),
],
)
def test_perform_notification_reason_to_skip_escalation_in_slack(
reason_to_skip_escalation,
error_code,
make_organization,
make_slack_team_identity,
make_user,
make_user_notification_policy,
make_alert_receive_channel,
make_alert_group,
make_user_notification_policy_log_record,
make_slack_message,
):
organization = make_organization()
slack_team_identity = make_slack_team_identity()
organization.slack_team_identity = slack_team_identity
organization.save()
user = make_user(organization=organization)
user_notification_policy = make_user_notification_policy(
user=user,
step=UserNotificationPolicy.Step.NOTIFY,
notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
)
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
alert_group.reason_to_skip_escalation = reason_to_skip_escalation
alert_group.save()
log_record = make_user_notification_policy_log_record(
author=user,
alert_group=alert_group,
notification_policy=user_notification_policy,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
)
if not error_code:
make_slack_message(alert_group=alert_group, channel_id="test_channel_id", slack_id="test_slack_id")
with patch.object(SlackMessage, "send_slack_notification") as mocked_send_slack_notification:
perform_notification(log_record.pk)
last_log_record = UserNotificationPolicyLogRecord.objects.last()
if error_code:
log_reason = f"Skipped escalation in Slack, reason: '{alert_group.get_reason_to_skip_escalation_display()}'"
mocked_send_slack_notification.assert_not_called()
assert last_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
assert last_log_record.reason == log_reason
assert last_log_record.notification_error_code == error_code
else:
mocked_send_slack_notification.assert_called()
assert last_log_record.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
@pytest.mark.django_db
def test_perform_notification_slack_prevent_posting(
make_organization,
make_slack_team_identity,
make_user,
make_user_notification_policy,
make_alert_receive_channel,
make_alert_group,
make_user_notification_policy_log_record,
make_slack_message,
):
organization = make_organization()
slack_team_identity = make_slack_team_identity()
organization.slack_team_identity = slack_team_identity
organization.save()
user = make_user(organization=organization)
user_notification_policy = make_user_notification_policy(
user=user,
step=UserNotificationPolicy.Step.NOTIFY,
notify_by=UserNotificationPolicy.NotificationChannel.SLACK,
)
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel=alert_receive_channel)
log_record = make_user_notification_policy_log_record(
author=user,
alert_group=alert_group,
notification_policy=user_notification_policy,
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
slack_prevent_posting=True,
)
make_slack_message(alert_group=alert_group, channel_id="test_channel_id", slack_id="test_slack_id")
with patch.object(SlackMessage, "send_slack_notification") as mocked_send_slack_notification:
perform_notification(log_record.pk)
mocked_send_slack_notification.assert_not_called()
last_log_record = UserNotificationPolicyLogRecord.objects.last()
assert last_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
assert last_log_record.reason == "Prevented from posting in Slack"
assert (
last_log_record.notification_error_code
== UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED
)

View file

@ -34,14 +34,19 @@ class AlertGroupTableColumnsOrganizationSerializer(serializers.Serializer):
"""
Validate that at least one column is selected as visible and that all default columns are in the list.
"""
columns = data["visible"] + data["hidden"]
request_columns_ids = [column["id"] for column in columns]
request_columns_by_type = {}
for column in data["visible"] + data["hidden"]:
request_columns_by_type.setdefault(column["type"], []).append(column["id"])
if len(data["visible"]) == 0:
raise ValidationError("At least one column should be selected as visible")
elif not set(request_columns_ids) >= set(AlertGroupTableDefaultColumnChoices.values):
elif not (
set(request_columns_by_type[AlertGroupTableColumnTypeChoices.DEFAULT])
== set(AlertGroupTableDefaultColumnChoices.values)
):
raise ValidationError("Default column cannot be removed")
elif len(request_columns_ids) > len(set(request_columns_ids)):
raise ValidationError("Duplicate column")
for columns_ids in request_columns_by_type.values():
if len(columns_ids) > len(set(columns_ids)):
raise ValidationError("Duplicate column")
return data

View file

@ -5,7 +5,11 @@ from rest_framework.test import APIClient
from apps.api.alert_group_table_columns import alert_group_table_user_settings
from apps.api.permissions import LegacyAccessControlRole
from apps.user_management.constants import AlertGroupTableColumnTypeChoices, default_columns
from apps.user_management.constants import (
AlertGroupTableColumnTypeChoices,
AlertGroupTableDefaultColumnChoices,
default_columns,
)
DEFAULT_COLUMNS = default_columns()
@ -41,6 +45,18 @@ def test_get_columns(
columns_settings({"name": "Test", "id": "test", "type": AlertGroupTableColumnTypeChoices.LABEL.value}),
status.HTTP_200_OK,
),
# add label column with the same id as default
(
columns_settings(),
columns_settings({"name": "Status", "id": "status", "type": AlertGroupTableColumnTypeChoices.LABEL.value}),
status.HTTP_200_OK,
),
# add unexisting default column
(
columns_settings(),
columns_settings({"name": "Hello", "id": "hello", "type": AlertGroupTableColumnTypeChoices.DEFAULT.value}),
status.HTTP_400_BAD_REQUEST,
),
# remove column
(
columns_settings({"name": "Test", "id": "test", "type": AlertGroupTableColumnTypeChoices.LABEL.value}),
@ -60,7 +76,13 @@ def test_get_columns(
# duplicate id
(
columns_settings(),
columns_settings({"name": "Test", "id": 1, "type": AlertGroupTableColumnTypeChoices.DEFAULT.value}),
columns_settings(
{
"name": "Test",
"id": AlertGroupTableDefaultColumnChoices.STATUS.value,
"type": AlertGroupTableColumnTypeChoices.DEFAULT.value,
}
),
status.HTTP_400_BAD_REQUEST,
),
# remove default column

View file

@ -701,7 +701,10 @@ def test_create_invalid_missing_fields(webhook_internal_api_setup, make_user_aut
}
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["http_method"][0] == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']."
assert (
response.json()["http_method"][0]
== "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']."
)
data = {
"name": "test webhook 3",
@ -711,7 +714,10 @@ def test_create_invalid_missing_fields(webhook_internal_api_setup, make_user_aut
}
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json()["http_method"][0] == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']."
assert (
response.json()["http_method"][0]
== "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']."
)
data = {"name": "test webhook 3", "url": TEST_URL, "trigger_type": 2000000, "http_method": "POST"}
response = client.post(url, data, format="json", **make_user_auth_headers(user, token))

View file

@ -1,7 +1,7 @@
import logging
from typing import Optional, TypedDict
from anymail.exceptions import AnymailWebhookValidationFailure
from anymail.exceptions import AnymailInvalidAddress, AnymailWebhookValidationFailure
from anymail.inbound import AnymailInboundMessage
from anymail.signals import AnymailInboundEvent
from anymail.webhooks import amazon_ses, mailgun, mailjet, mandrill, postal, postmark, sendgrid, sparkpost
@ -140,6 +140,18 @@ class InboundEmailWebhookView(AlertChannelDefiningMixin, APIView):
subject = subject.strip()
message = email.text or ""
message = message.strip()
sender = email.from_email.addr_spec
sender = self.get_sender_from_email_message(email)
return {"subject": subject, "message": message, "sender": sender}
def get_sender_from_email_message(self, email: AnymailInboundMessage) -> str:
try:
sender = email.from_email.addr_spec
except AnymailInvalidAddress as e:
# wasn't able to parse email address from message, return raw value from "From" header
logger.warning(
f"get_sender_from_email_message: issue during parsing sender from email message, getting raw value "
f"instead. Exception: {e}"
)
sender = ", ".join(email.get_all("From"))
return sender

View file

@ -1,10 +1,13 @@
import json
import pytest
from anymail.inbound import AnymailInboundMessage
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from apps.email.inbound import InboundEmailWebhookView
@pytest.mark.django_db
def test_amazon_ses_provider_load(settings, make_organization_and_user_with_token, make_alert_receive_channel):
@ -52,3 +55,18 @@ def test_amazon_ses_provider_load(settings, make_organization_and_user_with_toke
)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.parametrize(
"sender_value,expected_result",
[
("'Alex Smith' <test@example.com>", "test@example.com"),
("'Alex Smith' via [TEST] mail <test@example.com>", "'Alex Smith' via [TEST] mail <test@example.com>"),
],
)
def test_get_sender_from_email_message(sender_value, expected_result):
email = AnymailInboundMessage()
email["From"] = sender_value
view = InboundEmailWebhookView()
result = view.get_sender_from_email_message(email)
assert result == expected_result

View file

@ -169,6 +169,6 @@ def notify_about_integration_ratelimit_in_slack(organization_id, text, **kwargs)
if slack_team_identity is not None:
try:
sc = SlackClient(slack_team_identity, enable_ratelimit_retry=True)
sc.chat_postMessage(channel=organization.general_log_channel_id, text=text, team=slack_team_identity)
sc.chat_postMessage(channel=organization.general_log_channel_id, text=text)
except SlackAPIError as e:
logger.warning(f"Slack exception {e} while sending message for organization {organization_id}")

View file

@ -10,7 +10,6 @@ from apps.slack.models import SlackUserGroup
from apps.user_management.models import User
from apps.webhooks.models import Webhook
from common.api_helpers.custom_fields import (
CustomTimeField,
OrganizationFilteredPrimaryKeyRelatedField,
UsersFilteredByOrganizationField,
)
@ -78,8 +77,14 @@ class EscalationPolicySerializer(EagerLoadingMixin, OrderedModelSerializer):
source="custom_webhook",
)
important = serializers.BooleanField(required=False)
notify_if_time_from = CustomTimeField(required=False, source="from_time")
notify_if_time_to = CustomTimeField(required=False, source="to_time")
TIME_FORMAT = "%H:%M:%SZ"
notify_if_time_from = serializers.TimeField(
required=False, source="from_time", format=TIME_FORMAT, input_formats=[TIME_FORMAT]
)
notify_if_time_to = serializers.TimeField(
required=False, source="to_time", format=TIME_FORMAT, input_formats=[TIME_FORMAT]
)
class Meta:
model = EscalationPolicy

View file

@ -418,3 +418,41 @@ def test_update_escalation_policy_using_button_to_webhook(
assert response.data == serializer.data
# step is migrated
assert escalation_policy.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_WEBHOOK
@pytest.mark.django_db
@pytest.mark.parametrize(
"value,expected_status",
[
(5, status.HTTP_400_BAD_REQUEST),
("5", status.HTTP_400_BAD_REQUEST),
("5:00", status.HTTP_400_BAD_REQUEST),
("05:00:00", status.HTTP_400_BAD_REQUEST),
("05:00:00Z", status.HTTP_200_OK),
],
)
def test_update_escalation_policy_from_and_to_time(
make_organization_and_user_with_token,
make_escalation_chain,
make_escalation_policy,
value,
expected_status,
):
organization, _, token = make_organization_and_user_with_token()
escalation_chain = make_escalation_chain(organization)
escalation_policy = make_escalation_policy(escalation_chain, EscalationPolicy.STEP_NOTIFY_IF_TIME)
client = APIClient()
url = reverse("api-public:escalation_policies-detail", kwargs={"pk": escalation_policy.public_primary_key})
for field in ["notify_if_time_from", "notify_if_time_to"]:
response = client.put(url, data={field: value}, format="json", HTTP_AUTHORIZATION=token)
assert response.status_code == expected_status
if expected_status == status.HTTP_200_OK:
escalation_policy = EscalationPolicy.objects.get(public_primary_key=response.data["id"])
serializer = EscalationPolicySerializer(escalation_policy)
assert response.data == serializer.data
else:
assert response.json()[field][0] == "Time has wrong format. Use one of these formats instead: hh:mm:ssZ."

View file

@ -284,6 +284,9 @@ def test_get_team_select_blocks(
input_id_prefix = "nmxcnvmnxv"
def _contstruct_team_option(team):
return {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)}
# no team selected - no team direct paging integrations available
organization, _, _, slack_user_identity = make_organization_and_user_with_slack_identities()
blocks = _get_team_select_blocks(slack_user_identity, organization, False, None, input_id_prefix)
@ -309,11 +312,9 @@ def test_get_team_select_blocks(
assert len(blocks) == 2
input_block, context_block = blocks
team_option = {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)}
assert input_block["type"] == "input"
assert len(input_block["element"]["options"]) == 1
assert input_block["element"]["options"] == [team_option]
assert input_block["element"]["options"] == [_contstruct_team_option(team)]
assert context_block["elements"][0]["text"] == info_msg
# team selected
@ -337,9 +338,6 @@ def test_get_team_select_blocks(
assert len(blocks) == 2
input_block, context_block = blocks
def _contstruct_team_option(team):
return {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)}
team1_option = _contstruct_team_option(team1)
team2_option = _contstruct_team_option(team2)
@ -355,3 +353,23 @@ def test_get_team_select_blocks(
context_block["elements"][0]["text"]
== f"Integration <{team2_direct_paging_arc.web_link}|{team2_direct_paging_arc.verbal_name}> will be used for notification."
)
# team's direct paging integration has two routes associated with it
# the team should only be displayed once
organization, _, _, slack_user_identity = make_organization_and_user_with_slack_identities()
team = make_team(organization)
arc = make_alert_receive_channel(organization, team=team, integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING)
escalation_chain = make_escalation_chain(organization)
make_channel_filter(arc, is_default=True, escalation_chain=escalation_chain)
make_channel_filter(arc, escalation_chain=escalation_chain)
blocks = _get_team_select_blocks(slack_user_identity, organization, False, None, input_id_prefix)
assert len(blocks) == 2
input_block, context_block = blocks
assert input_block["type"] == "input"
assert len(input_block["element"]["options"]) == 1
assert input_block["element"]["options"] == [_contstruct_team_option(team)]
assert context_block["elements"][0]["text"] == info_msg

View file

@ -323,16 +323,20 @@ class Organization(MaintainableObject):
"""
from apps.alerts.models import AlertReceiveChannel
return self.alert_receive_channels.annotate(
num_channel_filters=Count("channel_filters"),
# used to determine if the organization has telegram configured
num_org_telegram_channels=Count("organization__telegram_channel"),
).filter(
Q(num_channel_filters__gt=1)
| (Q(organization__slack_team_identity__isnull=False) | Q(num_org_telegram_channels__gt=0))
| Q(channel_filters__is_default=True, channel_filters__escalation_chain__isnull=False)
| Q(channel_filters__is_default=True, channel_filters__notification_backends__isnull=False),
integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING,
return (
self.alert_receive_channels.annotate(
num_channel_filters=Count("channel_filters"),
# used to determine if the organization has telegram configured
num_org_telegram_channels=Count("organization__telegram_channel"),
)
.filter(
Q(num_channel_filters__gt=1)
| (Q(organization__slack_team_identity__isnull=False) | Q(num_org_telegram_channels__gt=0))
| Q(channel_filters__is_default=True, channel_filters__escalation_chain__isnull=False)
| Q(channel_filters__is_default=True, channel_filters__notification_backends__isnull=False),
integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING,
)
.distinct()
)
@property

View file

@ -217,6 +217,7 @@ def test_get_notifiable_direct_paging_integrations(
assert arc in notifiable_direct_paging_integrations
else:
assert arc not in notifiable_direct_paging_integrations
return notifiable_direct_paging_integrations
# integration has no default channel filter
org, arc = _make_org_and_arc()
@ -269,3 +270,11 @@ def test_get_notifiable_direct_paging_integrations(
escalation_chain = make_escalation_chain(org)
make_channel_filter(arc, is_default=True, notify_in_slack=False, escalation_chain=escalation_chain)
_assert(org, arc)
# integration has more than one channel filter associated with it, nevertheless the integration should only
# be returned once
org, arc = _make_org_and_arc()
make_channel_filter(arc, is_default=True)
make_channel_filter(arc, is_default=False)
notifiable_direct_paging_integrations = _assert(org, arc)
assert notifiable_direct_paging_integrations.count() == 1

View file

@ -32,7 +32,7 @@ if typing.TYPE_CHECKING:
from apps.alerts.models import EscalationPolicy
WEBHOOK_FIELD_PLACEHOLDER = "****************"
PUBLIC_WEBHOOK_HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS"]
PUBLIC_WEBHOOK_HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"]
logger = get_task_logger(__name__)
logger.setLevel(logging.DEBUG)
@ -186,7 +186,7 @@ class Webhook(models.Model):
if self.authorization_header:
request_kwargs["headers"]["Authorization"] = self.authorization_header
if self.http_method in ["POST", "PUT"]:
if self.http_method in ["POST", "PUT", "PATCH"]:
if self.forward_all:
request_kwargs["json"] = event_data
if self.is_legacy:
@ -255,6 +255,8 @@ class Webhook(models.Model):
r = requests.delete(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs)
elif self.http_method == "OPTIONS":
r = requests.options(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs)
elif self.http_method == "PATCH":
r = requests.patch(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs)
else:
raise ValueError(f"Unsupported http method: {self.http_method}")
return r

View file

@ -231,7 +231,7 @@ def test_make_request(make_organization, make_custom_webhook):
organization = make_organization()
with patch("apps.webhooks.models.webhook.requests") as mock_requests:
for method in ("GET", "POST", "PUT", "DELETE", "OPTIONS"):
for method in ("GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"):
webhook = make_custom_webhook(organization=organization, http_method=method)
webhook.make_request("url", {"foo": "bar"})
expected_call = getattr(mock_requests, method.lower())

View file

@ -1,5 +1,3 @@
import time
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import fields, serializers
from rest_framework.exceptions import ValidationError
@ -102,25 +100,6 @@ class UsersFilteredByOrganizationField(serializers.Field):
return queryset.filter(organization=request.user.organization, public_primary_key__in=data).distinct()
class CustomTimeField(fields.TimeField):
def to_representation(self, value):
result = super().to_representation(value)
if result[-1] != "Z":
result += "Z"
return result
def to_internal_value(self, data):
TIME_FORMAT_LEN = len("00:00:00Z")
if len(data) == TIME_FORMAT_LEN:
try:
time.strptime(data, "%H:%M:%SZ")
except ValueError:
raise BadRequest(detail="Invalid time format, should be '00:00:00Z'")
else:
raise BadRequest(detail="Invalid time format, should be '00:00:00Z'")
return data
class RouteIdField(fields.CharField):
def to_internal_value(self, data):
try:

View file

@ -5,7 +5,10 @@ whitenoise==5.3.0
twilio~=6.37.0
phonenumbers==8.10.0
celery[amqp,redis]==5.3.1
redis==5.0.1
# NOTE: temporarily installing a forked version of redis-py which adds some more debug logging
# in an effort to fix https://github.com/grafana/oncall-private/issues/2406
# revert this change once done debugging
git+https://github.com/grafana/redis-py@c0f167c
humanize==0.5.1
uwsgi==2.0.21
django-cors-headers==3.7.0

View file

@ -848,3 +848,5 @@ ZVONOK_POSTBACK_USER_CHOICE = os.getenv("ZVONOK_POSTBACK_USER_CHOICE", None)
ZVONOK_POSTBACK_USER_CHOICE_ACK = os.getenv("ZVONOK_POSTBACK_USER_CHOICE_ACK", None)
DETACHED_INTEGRATIONS_SERVER = getenv_boolean("DETACHED_INTEGRATIONS_SERVER", default=False)
ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS = os.environ.get("ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS", default=14)

View file

@ -122,6 +122,7 @@ CELERY_TASK_ROUTES = {
"apps.alerts.tasks.alert_group_web_title_cache.update_web_title_cache_for_alert_receive_channel": {"queue": "long"},
"apps.alerts.tasks.alert_group_web_title_cache.update_web_title_cache": {"queue": "long"},
"apps.alerts.tasks.check_escalation_finished.check_escalation_finished_task": {"queue": "long"},
"apps.alerts.tasks.check_escalation_finished.check_personal_notifications_task": {"queue": "long"},
"apps.grafana_plugin.tasks.sync.cleanup_organization_async": {"queue": "long"},
"apps.grafana_plugin.tasks.sync.start_cleanup_deleted_organizations": {"queue": "long"},
"apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"},

View file

@ -1,5 +1,4 @@
node_modules
frontend_enterprise
.DS_Store
test-results
playwright-report

View file

@ -16,7 +16,6 @@ grafana-plugin.yml
frontend_enterprise
# playwright
/test-results/
/playwright-report/
/playwright/.cache/
/e2e-tests/storageState.json

View file

@ -1,5 +1,3 @@
BASE_URL=http://localhost:30002/grafana
ONCALL_API_URL=http://oncall-dev-engine-external:8080/
GRAFANA_VIEWER_USERNAME=viewer
GRAFANA_VIEWER_PASSWORD=viewer
GRAFANA_EDITOR_USERNAME=editor

View file

@ -6,9 +6,6 @@ import { createIntegrationAndSendDemoAlert } from '../utils/integrations';
import { createOnCallSchedule } from '../utils/schedule';
test('we can create an oncall schedule + receive an alert', async ({ adminRolePage }) => {
// this test does a lot of stuff, lets give it adequate time to do its thing
test.slow();
const { page, userName } = adminRolePage;
const escalationChainName = generateRandomValue();
const integrationName = generateRandomValue();

View file

@ -1,6 +1,6 @@
import {expect, test} from "../fixtures";
import {createEscalationChain, EscalationStep, selectEscalationStepValue} from "../utils/escalationChain";
import {generateRandomValue} from "../utils/forms";
import { Locator, expect, test } from '../fixtures';
import { createEscalationChain, EscalationStep, selectEscalationStepValue } from '../utils/escalationChain';
import { generateRandomValue } from '../utils/forms';
test('escalation policy does not go back to "Default" after adding users to notify', async ({ adminRolePage }) => {
const { page, userName } = adminRolePage;
@ -13,7 +13,47 @@ test('escalation policy does not go back to "Default" after adding users to noti
// reload and check if important is still selected
await page.reload();
await expect(page.getByText('Important')).toBeVisible();
});
// TODO: unskip when https://github.com/grafana/oncall/issues/3585 is patched
test.skip('from_time and to_time for "Continue escalation if current UTC time is in range" escalation step type can be properly updated', async ({
adminRolePage,
}) => {
const FROM_TIME = '10:31';
const TO_TIME = '10:32';
const { page } = adminRolePage;
const escalationChainName = generateRandomValue();
// create escalation step w/ Continue escalation if current UTC time is in policy step
await createEscalationChain(page, escalationChainName, EscalationStep.ContinueEscalationIfCurrentUTCTimeIsIn);
const _getFromTimeInput = () => page.locator('[data-testid="time-range-from"] >> input');
const _getToTimeInput = () => page.locator('[data-testid="time-range-to"] >> input');
const clickAndInputValue = async (locator: Locator, value: string) => {
// the first click opens up dropdown which contains the time selector scrollable lists
await locator.click();
// the second click focuses on the input where we can actually type the time instead, much easier
const actualInput = page.locator('input[class="rc-time-picker-panel-input"]');
await actualInput.click();
await actualInput.selectText();
await actualInput.fill(value);
// click anywhere to close the dropdown
await page.click('body');
};
// update from and to time values
await clickAndInputValue(_getFromTimeInput(), FROM_TIME);
await clickAndInputValue(_getToTimeInput(), TO_TIME);
// reload and check that these values have been persisted
await page.reload();
await page.waitForLoadState('networkidle');
expect(await page.locator('text=Important').isVisible()).toBe(true);
expect(await _getFromTimeInput().textContent()).toBe(FROM_TIME);
expect(await _getToTimeInput().textContent()).toBe(FROM_TIME);
});

View file

@ -1,6 +1,8 @@
import { OrgRole } from '@grafana/data';
import { test as setup, chromium, expect, Page, BrowserContext, FullConfig, APIRequestContext } from '@playwright/test';
import { getOnCallApiUrl } from 'utils/consts';
import { VIEWER_USER_STORAGE_STATE, EDITOR_USER_STORAGE_STATE, ADMIN_USER_STORAGE_STATE } from '../playwright.config';
import GrafanaAPIClient from './utils/clients/grafana';
@ -13,7 +15,6 @@ import {
GRAFANA_VIEWER_USERNAME,
IS_CLOUD,
IS_OPEN_SOURCE,
ONCALL_API_URL,
} from './utils/constants';
import { clickButton, getInputByName } from './utils/forms';
import { goToGrafanaPage } from './utils/navigation';
@ -59,17 +60,26 @@ const configureOnCallPlugin = async (page: Page): Promise<void> => {
* go to the oncall plugin configuration page and wait for the page to be loaded
*/
await goToGrafanaPage(page, '/plugins/grafana-oncall-app');
await page.waitForSelector('text=Configure Grafana OnCall');
await page.waitForTimeout(2000);
/**
* we may need to fill in the OnCall API URL if it is not set in the process.env
* of the frontend build
*/
const onCallApiUrlInput = getInputByName(page, 'onCallApiUrl');
const pluginIsAutoConfigured = (await onCallApiUrlInput.count()) === 0;
// if plugin is configured, go to OnCall
const isConfigured = (await page.getByText('Connected to OnCall').count()) >= 1;
if (isConfigured) {
await page.getByRole('link', { name: 'Open Grafana OnCall' }).click();
return;
}
if (!pluginIsAutoConfigured) {
await onCallApiUrlInput.fill(ONCALL_API_URL);
// otherwise we may need to reconfigure the plugin
const needToReconfigure = (await page.getByText('try removing your plugin configuration').count()) >= 1;
if (needToReconfigure) {
await clickButton({ page, buttonText: 'Remove current configuration' });
await clickButton({ page, buttonText: /^Remove$/ });
}
await page.waitForTimeout(2000);
const needToEnterOnCallApiUrl = await page.getByText(/Connected to OnCall/).isHidden();
if (needToEnterOnCallApiUrl) {
await getInputByName(page, 'onCallApiUrl').fill(getOnCallApiUrl() || 'http://oncall-dev-engine:8080');
await clickButton({ page, buttonText: 'Connect' });
}
@ -88,13 +98,6 @@ const configureOnCallPlugin = async (page: Page): Promise<void> => {
* https://github.com/grafana/incident/blob/main/plugin/e2e/global-setup.ts
*/
setup('Configure Grafana OnCall plugin', async ({ request }, { config }) => {
/**
* Unconditionally marks the setup as "slow", giving it triple the default timeout.
* This is mostly useful for the rare case for Cloud Grafana instances where the instance may be down/unavailable
* and we need to poll it until it is available
*/
setup.slow();
if (IS_CLOUD) {
await grafanaApiClient.pollInstanceUntilItIsHealthy(request);
}

View file

@ -1,6 +1,7 @@
import { test, Page, expect } from '../fixtures';
import { generateRandomValue, selectDropdownValue } from '../utils/forms';
import { createIntegration } from '../utils/integrations';
import { createIntegration, searchIntegrationAndAssertItsPresence } from '../utils/integrations';
import { goToOnCallPage } from '../utils/navigation';
const HEARTBEAT_SETTINGS_FORM_TEST_ID = 'heartbeat-settings-form';
@ -12,7 +13,8 @@ test.describe("updating an integration's heartbeat interval works", async () =>
};
test('change heartbeat interval', async ({ adminRolePage: { page } }) => {
await createIntegration({ page, integrationName: generateRandomValue() });
const integrationName = generateRandomValue();
await createIntegration({ page, integrationName });
await _openHeartbeatSettingsForm(page);
@ -42,7 +44,8 @@ test.describe("updating an integration's heartbeat interval works", async () =>
});
test('send heartbeat', async ({ adminRolePage: { page } }) => {
await createIntegration({ page, integrationName: generateRandomValue() });
const integrationName = generateRandomValue();
await createIntegration({ page, integrationName });
await _openHeartbeatSettingsForm(page);
@ -59,6 +62,9 @@ test.describe("updating an integration's heartbeat interval works", async () =>
*/
await page.request.get(endpoint);
await page.reload({ waitUntil: 'networkidle' });
await goToOnCallPage(page, 'integrations');
await searchIntegrationAndAssertItsPresence({ page, integrationName });
await page.getByTestId('heartbeat-badge').waitFor();
});
});

View file

@ -17,7 +17,6 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs
await createIntegration({
page,
integrationSearchText: 'Alertmanager',
shouldGoToIntegrationsPage: false,
integrationName: ALERTMANAGER_INTEGRATION_NAME,
});
await page.waitForTimeout(1000);
@ -32,7 +31,6 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs
await createIntegration({
page,
integrationSearchText: 'Direct paging',
shouldGoToIntegrationsPage: false,
integrationName: DIRECT_PAGING_INTEGRATION_NAME,
});
await page.waitForTimeout(1000);
@ -40,15 +38,13 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs
await page.getByRole('tab', { name: 'Tab Integrations' }).click();
// By default Monitoring Systems tab is opened and newly created integrations are visible except Direct Paging one
await searchIntegrationAndAssertItsPresence({ page, integrationsTable, integrationName: WEBHOOK_INTEGRATION_NAME });
await searchIntegrationAndAssertItsPresence({ page, integrationName: WEBHOOK_INTEGRATION_NAME });
await searchIntegrationAndAssertItsPresence({
page,
integrationsTable,
integrationName: ALERTMANAGER_INTEGRATION_NAME,
});
await searchIntegrationAndAssertItsPresence({
page,
integrationsTable,
integrationName: DIRECT_PAGING_INTEGRATION_NAME,
visibleExpected: false,
});
@ -57,19 +53,16 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs
await page.getByRole('tab', { name: 'Tab Manual Direct Paging' }).click();
await searchIntegrationAndAssertItsPresence({
page,
integrationsTable,
integrationName: WEBHOOK_INTEGRATION_NAME,
visibleExpected: false,
});
await searchIntegrationAndAssertItsPresence({
page,
integrationsTable,
integrationName: ALERTMANAGER_INTEGRATION_NAME,
visibleExpected: false,
});
await searchIntegrationAndAssertItsPresence({
page,
integrationsTable,
integrationName: 'Direct paging',
});
});

View file

@ -103,6 +103,7 @@ test.describe('maintenance mode works', () => {
await createEscalationChain(page, escalationChainName, EscalationStep.NotifyUsers, userName);
await createIntegration({ page, integrationName });
await page.waitForTimeout(1000);
await assignEscalationChainToIntegration(page, escalationChainName);
await enableMaintenanceMode(page, maintenanceModeType);
@ -110,8 +111,6 @@ test.describe('maintenance mode works', () => {
};
test('debug mode', async ({ adminRolePage: { page, userName } }) => {
test.slow();
const { escalationChainName, integrationName } = await createIntegrationAndEscalationChainAndEnableMaintenanceMode(
page,
userName,
@ -128,7 +127,6 @@ test.describe('maintenance mode works', () => {
});
test('"maintenance" mode', async ({ adminRolePage: { page, userName } }) => {
test.slow();
const { integrationName } = await createIntegrationAndEscalationChainAndEnableMaintenanceMode(
page,
userName,

View file

@ -1,5 +1,4 @@
export const BASE_URL = process.env.BASE_URL || 'http://localhost:3000';
export const ONCALL_API_URL = process.env.ONCALL_API_URL || 'http://host.docker.internal:8080';
export const MAILSLURP_API_KEY = process.env.MAILSLURP_API_KEY;
export const GRAFANA_VIEWER_USERNAME = process.env.GRAFANA_VIEWER_USERNAME || 'viewer';

View file

@ -6,9 +6,10 @@ import { goToOnCallPage } from './navigation';
export enum EscalationStep {
NotifyUsers = 'Notify users',
NotifyUsersFromOnCallSchedule = 'Notify users from on-call schedule',
ContinueEscalationIfCurrentUTCTimeIsIn = 'Continue escalation if current UTC time is in range',
}
const escalationStepValuePlaceholder: Record<EscalationStep, string> = {
const escalationStepValuePlaceholder: Partial<Record<EscalationStep, string>> = {
[EscalationStep.NotifyUsers]: 'Select User',
[EscalationStep.NotifyUsersFromOnCallSchedule]: 'Select Schedule',
};

View file

@ -22,7 +22,7 @@ type SelectDropdownValueArgs = {
type ClickButtonArgs = {
page: Page;
buttonText: string;
buttonText: string | RegExp;
// if provided, use this Locator as the root of our search for the button
startingLocator?: Locator;
};

View file

@ -1,4 +1,4 @@
import { Locator, Page, expect } from '@playwright/test';
import { Page, expect } from '@playwright/test';
import { clickButton, generateRandomValue, selectDropdownValue } from './forms';
import { goToOnCallPage } from './navigation';
@ -38,17 +38,24 @@ export const createIntegration = async ({
.click();
// fill in the required inputs
(await page.waitForSelector('input[name="verbal_name"]', { state: 'attached' })).fill(integrationName);
(await page.waitForSelector('textarea[name="description_short"]', { state: 'attached' })).fill(
'Here goes your integration description'
);
await page.getByPlaceholder('Integration Name').fill(integrationName);
await page.getByPlaceholder('Integration Description').fill('Here goes your integration description');
await page.getByTestId('update-integration-button').focus();
await page.getByTestId('update-integration-button').click();
const grafanaUpdateBtn = page.getByTestId('update-integration-button');
await grafanaUpdateBtn.click();
await goToOnCallPage(page, 'integrations');
await searchIntegrationAndAssertItsPresence({ page, integrationName });
await page.getByRole('link', { name: integrationName }).click();
};
export const assignEscalationChainToIntegration = async (page: Page, escalationChainName: string): Promise<void> => {
await page.getByTestId('integration-escalation-chain-not-selected').click();
const notSelected = page.getByTestId('integration-escalation-chain-not-selected');
if (await notSelected.isHidden()) {
await clickButton({ page, buttonText: 'Add route' });
await page.waitForTimeout(500);
}
await notSelected.last().click();
// assign the escalation chain to the integration
await selectDropdownValue({
@ -56,7 +63,7 @@ export const assignEscalationChainToIntegration = async (page: Page, escalationC
selectType: 'grafanaSelect',
placeholderText: 'Select Escalation Chain',
value: escalationChainName,
startingLocator: page.getByTestId('escalation-chain-select'),
startingLocator: page.getByTestId('escalation-chain-select').last(),
});
};
@ -92,11 +99,9 @@ export const filterIntegrationsTableAndGoToDetailPage = async (page: Page, integ
export const searchIntegrationAndAssertItsPresence = async ({
page,
integrationName,
integrationsTable,
visibleExpected = true,
}: {
page: Page;
integrationsTable: Locator;
integrationName: string;
visibleExpected?: boolean;
}) => {
@ -105,6 +110,7 @@ export const searchIntegrationAndAssertItsPresence = async ({
.filter({ hasText: /^Search or filter results\.\.\.$/ })
.nth(1)
.click();
const integrationsTable = page.getByTestId('integrations-table');
await page.keyboard.insertText(integrationName);
await page.keyboard.press('Enter');
await page.waitForTimeout(2000);

View file

@ -1,13 +1,15 @@
import type { Page, Response } from '@playwright/test';
import type { Page } from '@playwright/test';
import { BASE_URL } from './constants';
type GrafanaPage = '/plugins/grafana-oncall-app';
type OnCallPage = 'alert-groups' | 'integrations' | 'escalations' | 'schedules' | 'users';
const _goToPage = (page: Page, url = ''): Promise<Response> => page.goto(`${BASE_URL}${url}`);
const _goToPage = async (page: Page, url = '') => page.goto(`${BASE_URL}${url}`);
export const goToGrafanaPage = (page: Page, url: GrafanaPage): Promise<Response> => _goToPage(page, url);
export const goToGrafanaPage = async (page: Page, url: GrafanaPage) => _goToPage(page, url);
export const goToOnCallPage = (page: Page, onCallPage: OnCallPage): Promise<Response> =>
_goToPage(page, `/a/grafana-oncall-app/${onCallPage}`);
export const goToOnCallPage = async (page: Page, onCallPage: OnCallPage) => {
await _goToPage(page, `/a/grafana-oncall-app/${onCallPage}`);
await page.waitForTimeout(1000);
};

View file

@ -1,4 +1,4 @@
import { PlaywrightTestProject, defineConfig, devices } from '@playwright/test';
import { PlaywrightTestProject, defineConfig, devices, PlaywrightTestConfig } from '@playwright/test';
import path from 'path';
/**
@ -12,7 +12,11 @@ export const EDITOR_USER_STORAGE_STATE = path.join(__dirname, 'e2e-tests/.auth/e
export const ADMIN_USER_STORAGE_STATE = path.join(__dirname, 'e2e-tests/.auth/admin.json');
const IS_CI = !!process.env.CI;
const BROWSERS = process.env.BROWSERS || 'chromium firefox webkit';
const BROWSERS = process.env.BROWSERS || 'chromium';
const REPORTER_WITH_DEFAULT = process.env.REPORTER || 'html';
const REPORTER = (
process.env.REPORTER === 'html' ? [['html', { open: 'never' }]] : REPORTER_WITH_DEFAULT
) as PlaywrightTestConfig['reporter'];
const SETUP_PROJECT_NAME = 'setup';
const getEnabledBrowsers = (browsers: PlaywrightTestProject[]) =>
@ -25,16 +29,18 @@ export default defineConfig({
testDir: './e2e-tests',
/* Maximum time all the tests can run for. */
globalTimeout: 20 * 60 * 1000, // 20 minutes
globalTimeout: 20 * 60 * 1_000, // 20 minutes
reporter: REPORTER,
/* Maximum time one test can run for. */
timeout: 60 * 1000,
timeout: 60_000,
expect: {
/**
* Maximum time expect() should wait for the condition to be met.
* For example in `await expect(locator).toHaveText();`
*/
timeout: 10000,
timeout: 6_000,
},
/* Run tests in files in parallel */
fullyParallel: false,
@ -46,10 +52,10 @@ export default defineConfig({
* NOTE: until we fix this issue (https://github.com/grafana/oncall/issues/1692) which occasionally leads
* to flaky tests.. let's allow 1 retry per test
*/
retries: IS_CI ? 1 : 0,
retries: 1,
workers: 2,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: 'html',
// reporter: 'html',
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
@ -59,7 +65,7 @@ export default defineConfig({
trace: 'on',
video: 'on',
headless: IS_CI,
headless: true,
},
/* Configure projects for major browsers. The final list is filtered based on BROWSERS env var */
@ -109,8 +115,9 @@ export default defineConfig({
// },
]),
/* Folder for test artifacts such as screenshots, videos, traces, etc. */
// outputDir: 'test-results/',
/* Folder for test artifacts such as screenshots, videos, traces, etc.
Set outside of grafana-plugin to prevent refreshing Grafana UI during e2e test runs */
outputDir: '../test-results/',
/* Run your local dev server before starting the tests */
// webServer: {

View file

@ -92,11 +92,15 @@ const TimeRange = (props: TimeRangeProps) => {
return (
<div className={cx('root', className)}>
<HorizontalGroup wrap>
{/* @ts-ignore actually TimeOfDayPicker uses Moment objects */}
<TimeOfDayPicker disabled={disabled} value={from} minuteStep={5} onChange={handleChangeFrom} />
<div data-testid="time-range-from">
{/* @ts-ignore actually TimeOfDayPicker uses Moment objects */}
<TimeOfDayPicker disabled={disabled} value={from} minuteStep={5} onChange={handleChangeFrom} />
</div>
to
{/* @ts-ignore actually TimeOfDayPicker uses Moment objects */}
<TimeOfDayPicker disabled={disabled} value={to} minuteStep={5} onChange={handleChangeTo} />
<div data-testid="time-range-to">
{/* @ts-ignore actually TimeOfDayPicker uses Moment objects */}
<TimeOfDayPicker disabled={disabled} value={to} minuteStep={5} onChange={handleChangeTo} />
</div>
{showNextDayTip && 'next day'}
</HorizontalGroup>
</div>

View file

@ -18,6 +18,7 @@ interface TooltipBadgeProps {
customIcon?: React.ReactNode;
addPadding?: boolean;
placement?;
testId?: string;
onHover?: () => void;
}
@ -36,11 +37,9 @@ const TooltipBadge: FC<TooltipBadgeProps> = (props) => {
icon,
customIcon,
className,
...rest
testId,
} = props;
const testId = rest['data-testid'];
return (
<Tooltip
placement={placement || 'bottom-start'}

View file

@ -5,7 +5,7 @@ import { useLocation } from 'react-router-dom';
import { OnCallPluginConfigPageProps } from 'types';
import PluginState, { PluginStatusResponseBase } from 'state/plugin';
import { FALLBACK_LICENSE, GRAFANA_LICENSE_OSS } from 'utils/consts';
import { FALLBACK_LICENSE, getOnCallApiUrl, GRAFANA_LICENSE_OSS, hasPluginBeenConfigured } from 'utils/consts';
import ConfigurationForm from './parts/ConfigurationForm';
import RemoveCurrentConfigurationButton from './parts/RemoveCurrentConfigurationButton';
@ -46,7 +46,8 @@ export const removePluginConfiguredQueryParams = (pluginIsEnabled: boolean): voi
const PluginConfigPage: FC<OnCallPluginConfigPageProps> = ({
plugin: {
meta: { jsonData, enabled: pluginIsEnabled },
meta,
meta: { enabled: pluginIsEnabled },
},
}) => {
const { search } = useLocation();
@ -75,11 +76,8 @@ const PluginConfigPage: FC<OnCallPluginConfigPageProps> = ({
const [resettingPlugin, setResettingPlugin] = useState<boolean>(false);
const [pluginResetError, setPluginResetError] = useState<string>(null);
const pluginMetaOnCallApiUrl = jsonData?.onCallApiUrl;
const processEnvOnCallApiUrl = process.env.ONCALL_API_URL; // don't destructure this, will break how webpack supplies this
const onCallApiUrl = pluginMetaOnCallApiUrl || processEnvOnCallApiUrl;
const licenseType = pluginIsConnected?.license || FALLBACK_LICENSE;
const onCallApiUrl = getOnCallApiUrl(meta);
const resetQueryParams = useCallback(() => removePluginConfiguredQueryParams(pluginIsEnabled), [pluginIsEnabled]);
@ -110,12 +108,12 @@ const PluginConfigPage: FC<OnCallPluginConfigPageProps> = ({
* Supplying the env var basically allows to skip the configuration form
* (check webpack.config.js to see how this is set)
*/
if (!pluginMetaOnCallApiUrl && processEnvOnCallApiUrl) {
if (!hasPluginBeenConfigured(meta) && onCallApiUrl) {
/**
* onCallApiUrl is not yet saved in the grafana plugin settings, but has been supplied as an env var
* lets auto-trigger a self-hosted plugin install w/ the onCallApiUrl passed in as an env var
*/
const errorMsg = await PluginState.selfHostedInstallPlugin(processEnvOnCallApiUrl, true);
const errorMsg = await PluginState.selfHostedInstallPlugin(onCallApiUrl, true);
if (errorMsg) {
setPluginConnectionCheckError(errorMsg);
setCheckingIfPluginIsConnected(false);
@ -146,7 +144,7 @@ const PluginConfigPage: FC<OnCallPluginConfigPageProps> = ({
if (!pluginConfiguredRedirect) {
configurePluginAndUpdatePluginStatus();
}
}, [pluginMetaOnCallApiUrl, processEnvOnCallApiUrl, onCallApiUrl, pluginConfiguredRedirect]);
}, [onCallApiUrl, pluginConfiguredRedirect]);
const resetMessages = useCallback(() => {
setPluginResetError(null);
@ -210,9 +208,7 @@ const PluginConfigPage: FC<OnCallPluginConfigPageProps> = ({
</>
);
} else if (!pluginIsConnected) {
content = (
<ConfigurationForm onSuccessfulSetup={triggerUpdatePluginStatus} defaultOnCallApiUrl={processEnvOnCallApiUrl} />
);
content = <ConfigurationForm onSuccessfulSetup={triggerUpdatePluginStatus} defaultOnCallApiUrl={onCallApiUrl} />;
} else {
// plugin is fully connected and synced
const pluginLink = (

View file

@ -388,6 +388,7 @@ const RotationForm = observer((props: RotationFormProps) => {
setShowActiveOnSelectedDays(Boolean(shift.by_day?.length));
const activeOnSelectedPartOfDay =
shift.frequency !== RepeatEveryPeriod.MONTHS &&
repeatEveryInSeconds(shift.frequency, shift.interval) !== shiftEnd.diff(shiftStart, 'seconds');
setShowActiveOnSelectedPartOfDay(activeOnSelectedPartOfDay);

View file

@ -420,7 +420,7 @@ class Integration extends React.Component<IntegrationProps, IntegrationState> {
Autoresolve:
</Text>
<Text type="primary">
{IntegrationHelper.truncateLine(templates['resolve_condition_template'] || 'disabled')}
{IntegrationHelper.truncateLine(templates?.['resolve_condition_template'] || 'disabled')}
</Text>
</div>
@ -1131,7 +1131,7 @@ const IntegrationHeader: React.FC<IntegrationHeaderProps> = ({
{alertReceiveChannel.maintenance_till && (
<TooltipBadge
data-testid="maintenance-mode-remaining-time-tooltip"
testId="maintenance-mode-remaining-time-tooltip"
borderType="primary"
icon="pause"
text={IntegrationHelper.getMaintenanceText(alertReceiveChannel.maintenance_till)}
@ -1193,7 +1193,6 @@ const IntegrationHeader: React.FC<IntegrationHeaderProps> = ({
return (
<TooltipBadge
data-testid="heartbeat-badge"
text={undefined}
className={cx('heartbeat-badge')}
borderType={heartbeatStatus ? 'success' : 'danger'}

View file

@ -440,6 +440,7 @@ class Integrations extends React.Component<IntegrationsProps, IntegrationsState>
<div>
{alertReceiveChannel.is_available_for_integration_heartbeat && heartbeat?.last_heartbeat_time_verbal && (
<TooltipBadge
testId="heartbeat-badge"
text={undefined}
className={cx('heartbeat-badge')}
placement="top"

View file

@ -37,6 +37,7 @@ import { retryFailingPromises } from 'utils/async';
import {
APP_VERSION,
CLOUD_VERSION_REGEX,
getOnCallApiUrl,
GRAFANA_LICENSE_CLOUD,
GRAFANA_LICENSE_OSS,
PLUGIN_ROOT,
@ -167,7 +168,7 @@ export class RootBaseStore {
*/
async setupPlugin(meta: OnCallAppPluginMeta) {
this.initializationError = null;
this.onCallApiUrl = meta.jsonData?.onCallApiUrl;
this.onCallApiUrl = getOnCallApiUrl(meta);
if (!FaroHelper.faro) {
FaroHelper.initializeFaro(this.onCallApiUrl);
@ -180,7 +181,7 @@ export class RootBaseStore {
if (this.isOpenSource() && !meta.secureJsonFields?.onCallApiToken) {
// Reinstall plugin if onCallApiToken is missing
const errorMsg = await PluginState.selfHostedInstallPlugin(process.env.ONCALL_API_URL, true);
const errorMsg = await PluginState.selfHostedInstallPlugin(this.onCallApiUrl, true);
if (errorMsg) {
return this.setupPluginError(errorMsg);
}
@ -310,8 +311,8 @@ export class RootBaseStore {
await this.slackStore.installSlackIntegration();
}
@action.bound
async getApiUrlForSettings() {
const settings = await PluginState.getGrafanaPluginSettings();
return settings.jsonData?.onCallApiUrl;
return this.onCallApiUrl;
}
}

View file

@ -17,6 +17,8 @@ jest.mock('grafana/app/core/core', () => ({
},
}));
const onCallApiUrl = 'http://oncall-dev-engine:8080';
const isUserActionAllowed = isUserActionAllowedOriginal as jest.Mock<ReturnType<typeof isUserActionAllowedOriginal>>;
const generatePluginData = (
@ -32,7 +34,6 @@ describe('rootBaseStore', () => {
});
test("onCallApiUrl is not set in the plugin's meta jsonData", async () => {
// mocks/setup
const rootBaseStore = new RootBaseStore();
// test
@ -43,9 +44,7 @@ describe('rootBaseStore', () => {
});
test('when there is an issue checking the plugin connection, the error is properly handled', async () => {
// mocks/setup
const errorMsg = 'ohhh noooo error';
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce(errorMsg);
@ -61,8 +60,6 @@ describe('rootBaseStore', () => {
});
test('currently undergoing maintenance', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const maintenanceMessage = 'mncvnmvcmnvkjdjkd';
@ -82,8 +79,6 @@ describe('rootBaseStore', () => {
});
test('anonymous user', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce({
@ -108,8 +103,6 @@ describe('rootBaseStore', () => {
});
test('the plugin is not installed, and allow_signup is false', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce({
@ -137,8 +130,6 @@ describe('rootBaseStore', () => {
});
test('plugin is not installed, user is not an Admin', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
contextSrv.user.orgRole = OrgRole.Viewer;
@ -174,8 +165,6 @@ describe('rootBaseStore', () => {
{ is_installed: false, token_ok: true },
{ is_installed: true, token_ok: false },
])('signup is allowed, user is an admin, plugin installation is triggered', async (scenario) => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const mockedLoadCurrentUser = jest.fn();
@ -219,8 +208,6 @@ describe('rootBaseStore', () => {
expected_result: false,
},
])('signup is allowed, licensedAccessControlEnabled, various roles and permissions', async (scenario) => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const mockedLoadCurrentUser = jest.fn();
@ -261,8 +248,6 @@ describe('rootBaseStore', () => {
});
test('plugin is not installed, signup is allowed, the user is an admin, and plugin installation throws an error', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const installPluginError = new Error('asdasdfasdfasf');
const humanReadableErrorMsg = 'asdfasldkfjaksdjflk';
@ -304,8 +289,6 @@ describe('rootBaseStore', () => {
});
test('when the plugin is installed, a data sync is triggered', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const mockedLoadCurrentUser = jest.fn();
@ -333,8 +316,6 @@ describe('rootBaseStore', () => {
});
test('when the plugin is installed, and the data sync returns an error, it is properly handled', async () => {
// mocks/setup
const onCallApiUrl = 'http://asdfasdf.com';
const rootBaseStore = new RootBaseStore();
const mockedLoadCurrentUser = jest.fn();
const updatePluginStatusError = 'asdasdfasdfasf';

View file

@ -1,3 +1,5 @@
import { OnCallAppPluginMeta } from 'types';
import plugin from '../../package.json'; // eslint-disable-line
// Navbar
@ -30,6 +32,13 @@ export const ONCALL_PROD = 'https://oncall-prod-us-central-0.grafana.net/oncall'
export const ONCALL_OPS = 'https://oncall-ops-us-east-0.grafana.net/oncall';
export const ONCALL_DEV = 'https://oncall-dev-us-central-0.grafana.net/oncall';
// Single source of truth on the frontend for OnCall API URL
export const getOnCallApiUrl = (meta?: OnCallAppPluginMeta) =>
meta?.jsonData?.onCallApiUrl || process.env.ONCALL_API_URL;
// If the plugin has never been configured, onCallApiUrl will be undefined in the plugin's jsonData
export const hasPluginBeenConfigured = (meta?: OnCallAppPluginMeta) => Boolean(meta?.jsonData?.onCallApiUrl);
// Faro
export const FARO_ENDPOINT_DEV =
'https://faro-collector-prod-us-central-0.grafana.net/collect/fb03e474a96cf867f4a34590c002984c';

View file

@ -82,6 +82,13 @@ engine:
# Extra volume mounts for the main app container
extraVolumeMounts: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Extra volumes for the pod
extraVolumes: []
# - name: postgres-tls
# configMap:
# name: my-postgres-tls
@ -91,13 +98,6 @@ engine:
# name: my-redis-tls
# defaultMode: 0640
# Extra volumes for the pod
extraVolumes: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
detached_integrations_service:
enabled: false
type: LoadBalancer
@ -157,6 +157,13 @@ detached_integrations:
# Extra volume mounts for the container
extraVolumeMounts: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Extra volumes for the pod
extraVolumes: []
# - name: postgres-tls
# configMap:
# name: my-postgres-tls
@ -166,13 +173,6 @@ detached_integrations:
# name: my-redis-tls
# defaultMode: 0640
# Extra volumes for the pod
extraVolumes: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Celery workers pods configuration
celery:
replicaCount: 1
@ -235,6 +235,13 @@ celery:
# Extra volume mounts for the main container
extraVolumeMounts: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Extra volumes for the pod
extraVolumes: []
# - name: postgres-tls
# configMap:
# name: my-postgres-tls
@ -244,13 +251,6 @@ celery:
# name: my-redis-tls
# defaultMode: 0640
# Extra volumes for the pod
extraVolumes: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Telegram polling pod configuration
telegramPolling:
enabled: false
@ -268,6 +268,13 @@ telegramPolling:
# Extra volume mounts for the main container
extraVolumeMounts: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Extra volumes for the pod
extraVolumes: []
# - name: postgres-tls
# configMap:
# name: my-postgres-tls
@ -277,13 +284,6 @@ telegramPolling:
# name: my-redis-tls
# defaultMode: 0640
# Extra volumes for the pod
extraVolumes: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
oncall:
# this is intended to be used for local development. In short, it will mount the ./engine dir into
# any backend related containers, to allow hot-reloading + also run the containers with slightly modified
@ -420,6 +420,13 @@ migrate:
# Extra volume mounts for the main container
extraVolumeMounts: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Extra volumes for the pod
extraVolumes: []
# - name: postgres-tls
# configMap:
# name: my-postgres-tls
@ -429,13 +436,6 @@ migrate:
# name: my-redis-tls
# defaultMode: 0640
# Extra volumes for the pod
extraVolumes: []
# - mountPath: /mnt/postgres-tls
# name: postgres-tls
# - mountPath: /mnt/redis-tls
# name: redis-tls
# Sets environment variables with name capitalized and prefixed with UWSGI_,
# and dashes are substituted with underscores.
# see more: https://uwsgi-docs.readthedocs.io/en/latest/Configuration.html#environment-variables