oncall-engine/engine/apps/alerts/tests/test_alert_group.py

870 lines
32 KiB
Python
Raw Permalink Normal View History

import hashlib
from unittest.mock import call, patch
import pytest
from apps.alerts.constants import ActionSource, AlertGroupState
from apps.alerts.incident_appearance.renderers.phone_call_renderer import AlertGroupPhoneCallRenderer
from apps.alerts.models import Alert, AlertGroup, AlertGroupLogRecord
from apps.alerts.tasks import wipe
from apps.alerts.tasks.delete_alert_group import (
delete_alert_group,
finish_delete_alert_group,
send_alert_group_signal_for_delete,
)
from apps.slack.client import SlackClient
from apps.slack.errors import SlackAPIMessageNotFoundError, SlackAPIRatelimitError
from apps.slack.tests.conftest import build_slack_response
@pytest.mark.django_db
def test_render_for_phone_call(
make_organization_with_slack_team_identity,
make_alert_receive_channel,
make_alert_group,
make_alert,
refactor `SlackMessage.channel_id` (`CHAR` field) to `SlackMessage.channel` (foreign key relationship) (#5292) # What this PR does Related to https://github.com/grafana/oncall-private/issues/2947 **NOTE** This PR introduces steps 1 and 2 of the 3 part migration proposed [here](https://raintank-corp.slack.com/archives/C06K1MQ07GS/p1732555465144099). Step 3, swapping reads to be from the new-column and dropping dual-writes, will be done in a future PR/release. --- I’m tackling this work now because _ultimately_ I want to move `AlertReceiveChannel.rate_limited_in_slack_at` to `SlackChannel.rate_limited_at` , but first I sorta need to refactor `SlackMessage.channel_id` from a `CHAR` field to a foreign key relationship (because in the spots where we touch Slack rate limiting, like [here](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/alert_group_slack_service.py#L42-L50) for example, we only have `slack_message.channel_id`, which means I need to do extra queries to fetch the appropriate `SlackChannel` to then be able to get/set `SlackChannel.rate_limited_at` Other minor stuffs: - it also prepares us to drop `SlackMessage._slack_team_identity`. We already have a `@property` of `SlackMessage.slack_team_identity` (which [previously had some hacky logic](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/models/slack_message.py#L74-L84)). I've refactored `SlackMessage.slack_team_identity` to simply point to `self.organization.slack_team_identity` + updated our code to _stop_ setting `SlackMessage._slack_team_identity` (will drop this column in future release) ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-11-26 06:03:38 -05:00
make_slack_channel,
make_slack_message,
):
refactor `SlackMessage.channel_id` (`CHAR` field) to `SlackMessage.channel` (foreign key relationship) (#5292) # What this PR does Related to https://github.com/grafana/oncall-private/issues/2947 **NOTE** This PR introduces steps 1 and 2 of the 3 part migration proposed [here](https://raintank-corp.slack.com/archives/C06K1MQ07GS/p1732555465144099). Step 3, swapping reads to be from the new-column and dropping dual-writes, will be done in a future PR/release. --- I’m tackling this work now because _ultimately_ I want to move `AlertReceiveChannel.rate_limited_in_slack_at` to `SlackChannel.rate_limited_at` , but first I sorta need to refactor `SlackMessage.channel_id` from a `CHAR` field to a foreign key relationship (because in the spots where we touch Slack rate limiting, like [here](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/alert_group_slack_service.py#L42-L50) for example, we only have `slack_message.channel_id`, which means I need to do extra queries to fetch the appropriate `SlackChannel` to then be able to get/set `SlackChannel.rate_limited_at` Other minor stuffs: - it also prepares us to drop `SlackMessage._slack_team_identity`. We already have a `@property` of `SlackMessage.slack_team_identity` (which [previously had some hacky logic](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/models/slack_message.py#L74-L84)). I've refactored `SlackMessage.slack_team_identity` to simply point to `self.organization.slack_team_identity` + updated our code to _stop_ setting `SlackMessage._slack_team_identity` (will drop this column in future release) ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-11-26 06:03:38 -05:00
organization, slack_team_identity = make_organization_with_slack_team_identity()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
refactor `SlackMessage.channel_id` (`CHAR` field) to `SlackMessage.channel` (foreign key relationship) (#5292) # What this PR does Related to https://github.com/grafana/oncall-private/issues/2947 **NOTE** This PR introduces steps 1 and 2 of the 3 part migration proposed [here](https://raintank-corp.slack.com/archives/C06K1MQ07GS/p1732555465144099). Step 3, swapping reads to be from the new-column and dropping dual-writes, will be done in a future PR/release. --- I’m tackling this work now because _ultimately_ I want to move `AlertReceiveChannel.rate_limited_in_slack_at` to `SlackChannel.rate_limited_at` , but first I sorta need to refactor `SlackMessage.channel_id` from a `CHAR` field to a foreign key relationship (because in the spots where we touch Slack rate limiting, like [here](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/alert_group_slack_service.py#L42-L50) for example, we only have `slack_message.channel_id`, which means I need to do extra queries to fetch the appropriate `SlackChannel` to then be able to get/set `SlackChannel.rate_limited_at` Other minor stuffs: - it also prepares us to drop `SlackMessage._slack_team_identity`. We already have a `@property` of `SlackMessage.slack_team_identity` (which [previously had some hacky logic](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/models/slack_message.py#L74-L84)). I've refactored `SlackMessage.slack_team_identity` to simply point to `self.organization.slack_team_identity` + updated our code to _stop_ setting `SlackMessage._slack_team_identity` (will drop this column in future release) ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-11-26 06:03:38 -05:00
slack_channel = make_slack_channel(slack_team_identity)
chore: drop usage of `SlackMessage.organization` + drop orphaned `SlackMessage`s (#5330) # What this PR does - Stops writing `SlackMessage.organization` + removes references to this field. [As we discussed](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733315887463279?thread_ts=1733311105.095309&cid=C083TU81TCH), we do not need this field on this model/table, `SlackMessage._slack_team_identity` is sufficient (`organization` will be dropped in a separate PR) - Adds a data migration script which: - drops orphaned `SlackMessage` records; ie. ones which, even after the [`engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py`](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py) migration, still don't have a `SlackMessage.channel` id filled in (we discussed + agreed on dropping these records [here](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733329914516859?thread_ts=1733311105.095309&cid=C083TU81TCH)) - fills in empty `SlackMessage.slack_team_identity` values (from `slack_message.channel.slack_team_identity`) ### Other notes On the `organization` topic. We store records in `SlackMessage` for two purposes (AFAICT), and in both cases, we have references back to the `organization`: - alert groups - `slack_message.alert_group.channel.organization` - shift swap requests - `shift_swap_request.schedule.organization` ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-12-06 11:43:40 -05:00
make_slack_message(slack_channel, alert_group=alert_group)
make_alert(
alert_group,
raw_request_data={
"status": "firing",
"labels": {
"alertname": "TestAlert",
"region": "eu-1",
},
"annotations": {},
"startsAt": "2018-12-25T15:47:47.377363608Z",
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
},
)
expected_verbose_name = (
f"to check an Alert Group from Grafana OnCall. "
f"Alert via {alert_receive_channel.verbal_name} - Grafana Legacy Alerting with title TestAlert triggered 1 times"
)
rendered_text = AlertGroupPhoneCallRenderer(alert_group).render()
assert expected_verbose_name in rendered_text
@pytest.mark.django_db
def test_wipe(
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
make_alert,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
alert = make_alert(alert_group, raw_request_data={"test": 42})
wipe(alert_group.pk, user.pk)
alert_group.refresh_from_db()
alert.refresh_from_db()
assert alert_group.wiped_at is not None
assert alert_group.wiped_by == user
assert alert.raw_request_data == {}
@patch.object(SlackClient, "reactions_remove")
@patch.object(SlackClient, "chat_delete")
@pytest.mark.django_db
def test_delete(
mock_chat_delete,
mock_reactions_remove,
make_organization_with_slack_team_identity,
make_user,
make_alert_receive_channel,
make_alert_group,
make_alert,
make_slack_channel,
make_slack_message,
make_resolution_note_slack_message,
django_capture_on_commit_callbacks,
):
"""test alert group deleting"""
organization, slack_team_identity = make_organization_with_slack_team_identity()
slack_channel1 = make_slack_channel(slack_team_identity)
slack_channel2 = make_slack_channel(slack_team_identity)
Add RBAC Support (#777) * Modify plugin.json to support RBAC role registration * defines 26 new custom roles in plugin.json. The main roles are: - Admin: read/write access to everything in OnCall - Reader: read access to everything in OnCall - OnCaller : read access to everything in OnCall + edit access to Alert Groups and Schedules - <object-type> Editor: read/write access to everything related to <object-type> - <object-type> Reader: read access for <object-type> - User Settings Admin: read/write access to all user's settings, not just own settings. This is in comparison to User Settings Editor which can only read/write own settings * update changelog and documentation (#686) * implement RBAC for OnCall backend This commit refactors backend authorization. It trys to use RBAC authorization if the org's grafana instance supports it, otherwise it falls back to basic role authorization. * update RBAC backend tests * add tests for RBAC changes - run backend tests as matrix where RBAC is enabled/disabled. When RBAC is enabled, the permissions granted are read from the role grants in the frontend's plugin.json file (instead of relying what we specify in RBACPermission.Permissions) - remove --reuse-db --nomigrations flags from engine/tox.ini - minor autoformatting changes to docker-compose-developer.yml * remove --ds=settings.ci-test from pytest CI command DJANGO_SETTINGS_MODULE is already specified as an env var so this is just unecessary duplication * update gitignore * update github action job name for "test" * RBAC frontend changes * refactors the use of basic roles (ex. Viewer, Editor, Admin) use RBAC permissions (when supported), or falling back to basic roles when RBAC is not supported. - updates the UserAction enum in grafana-plugin/src/state/userAction.ts. Previously this was hardcoded to a list of strings that were being returned by the OnCall API. Now the values here correspond to the permissions in plugin.json (plus a fallback role) * changes per Gabriel's comments: - get rid of group attribute in rbac roles - remove displayName role attribute - remove hidden role attribute - add back role to includes section * don't try to update user timezone if they don't have permission
2022-11-29 09:41:56 +01:00
user = make_user(organization=organization)
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group, raw_request_data={})
# Create Slack messages
chore: drop usage of `SlackMessage.organization` + drop orphaned `SlackMessage`s (#5330) # What this PR does - Stops writing `SlackMessage.organization` + removes references to this field. [As we discussed](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733315887463279?thread_ts=1733311105.095309&cid=C083TU81TCH), we do not need this field on this model/table, `SlackMessage._slack_team_identity` is sufficient (`organization` will be dropped in a separate PR) - Adds a data migration script which: - drops orphaned `SlackMessage` records; ie. ones which, even after the [`engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py`](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py) migration, still don't have a `SlackMessage.channel` id filled in (we discussed + agreed on dropping these records [here](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733329914516859?thread_ts=1733311105.095309&cid=C083TU81TCH)) - fills in empty `SlackMessage.slack_team_identity` values (from `slack_message.channel.slack_team_identity`) ### Other notes On the `organization` topic. We store records in `SlackMessage` for two purposes (AFAICT), and in both cases, we have references back to the `organization`: - alert groups - `slack_message.alert_group.channel.organization` - shift swap requests - `shift_swap_request.schedule.organization` ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-12-06 11:43:40 -05:00
slack_message = make_slack_message(slack_channel1, alert_group=alert_group)
resolution_note_1 = make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
posted_by_bot=True,
slack_channel=slack_channel1,
ts="test1_ts",
)
resolution_note_2 = make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
added_to_resolution_note=True,
slack_channel=slack_channel2,
ts="test2_ts",
)
assert alert_group.alerts.count() == 1
assert alert_group.slack_messages.count() == 1
assert alert_group.resolution_note_slack_messages.count() == 2
with patch(
"apps.alerts.tasks.delete_alert_group.send_alert_group_signal_for_delete.delay", return_value=None
) as mock_send_alert_group_signal:
with django_capture_on_commit_callbacks(execute=True):
delete_alert_group(alert_group.pk, user.pk)
assert mock_send_alert_group_signal.call_count == 1
with patch(
"apps.alerts.tasks.delete_alert_group.finish_delete_alert_group.apply_async", return_value=None
) as mock_finish_delete_alert_group:
send_alert_group_signal_for_delete(*mock_send_alert_group_signal.call_args.args)
assert mock_finish_delete_alert_group.call_count == 1
finish_delete_alert_group(alert_group.pk)
assert not alert_group.alerts.exists()
assert not alert_group.slack_messages.exists()
assert not alert_group.resolution_note_slack_messages.exists()
with pytest.raises(AlertGroup.DoesNotExist):
alert_group.refresh_from_db()
# Check that appropriate Slack API calls are made
assert mock_chat_delete.call_count == 2
assert mock_chat_delete.call_args_list[0] == call(
channel=resolution_note_1.slack_channel.slack_id, ts=resolution_note_1.ts
)
refactor `SlackMessage.channel_id` (`CHAR` field) to `SlackMessage.channel` (foreign key relationship) (#5292) # What this PR does Related to https://github.com/grafana/oncall-private/issues/2947 **NOTE** This PR introduces steps 1 and 2 of the 3 part migration proposed [here](https://raintank-corp.slack.com/archives/C06K1MQ07GS/p1732555465144099). Step 3, swapping reads to be from the new-column and dropping dual-writes, will be done in a future PR/release. --- I’m tackling this work now because _ultimately_ I want to move `AlertReceiveChannel.rate_limited_in_slack_at` to `SlackChannel.rate_limited_at` , but first I sorta need to refactor `SlackMessage.channel_id` from a `CHAR` field to a foreign key relationship (because in the spots where we touch Slack rate limiting, like [here](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/alert_group_slack_service.py#L42-L50) for example, we only have `slack_message.channel_id`, which means I need to do extra queries to fetch the appropriate `SlackChannel` to then be able to get/set `SlackChannel.rate_limited_at` Other minor stuffs: - it also prepares us to drop `SlackMessage._slack_team_identity`. We already have a `@property` of `SlackMessage.slack_team_identity` (which [previously had some hacky logic](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/models/slack_message.py#L74-L84)). I've refactored `SlackMessage.slack_team_identity` to simply point to `self.organization.slack_team_identity` + updated our code to _stop_ setting `SlackMessage._slack_team_identity` (will drop this column in future release) ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-11-26 06:03:38 -05:00
assert mock_chat_delete.call_args_list[1] == call(channel=slack_message.channel.slack_id, ts=slack_message.slack_id)
mock_reactions_remove.assert_called_once_with(
channel=resolution_note_2.slack_channel.slack_id, name="memo", timestamp=resolution_note_2.ts
)
@pytest.mark.parametrize("api_method", ["reactions_remove", "chat_delete"])
@patch.object(send_alert_group_signal_for_delete, "apply_async")
@pytest.mark.django_db
def test_delete_slack_ratelimit(
mock_send_alert_group_signal_for_delete,
api_method,
make_organization_with_slack_team_identity,
make_user,
make_alert_receive_channel,
make_alert_group,
make_alert,
make_slack_channel,
make_slack_message,
make_resolution_note_slack_message,
django_capture_on_commit_callbacks,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
slack_channel1 = make_slack_channel(slack_team_identity)
slack_channel2 = make_slack_channel(slack_team_identity)
user = make_user(organization=organization)
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group, raw_request_data={})
# Create Slack messages
chore: drop usage of `SlackMessage.organization` + drop orphaned `SlackMessage`s (#5330) # What this PR does - Stops writing `SlackMessage.organization` + removes references to this field. [As we discussed](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733315887463279?thread_ts=1733311105.095309&cid=C083TU81TCH), we do not need this field on this model/table, `SlackMessage._slack_team_identity` is sufficient (`organization` will be dropped in a separate PR) - Adds a data migration script which: - drops orphaned `SlackMessage` records; ie. ones which, even after the [`engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py`](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py) migration, still don't have a `SlackMessage.channel` id filled in (we discussed + agreed on dropping these records [here](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733329914516859?thread_ts=1733311105.095309&cid=C083TU81TCH)) - fills in empty `SlackMessage.slack_team_identity` values (from `slack_message.channel.slack_team_identity`) ### Other notes On the `organization` topic. We store records in `SlackMessage` for two purposes (AFAICT), and in both cases, we have references back to the `organization`: - alert groups - `slack_message.alert_group.channel.organization` - shift swap requests - `shift_swap_request.schedule.organization` ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-12-06 11:43:40 -05:00
make_slack_message(slack_channel1, alert_group=alert_group)
make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
posted_by_bot=True,
slack_channel=slack_channel1,
ts="test1_ts",
)
make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
added_to_resolution_note=True,
slack_channel=slack_channel2,
ts="test2_ts",
)
with patch(
"apps.alerts.tasks.delete_alert_group.send_alert_group_signal_for_delete.delay", return_value=None
) as mock_send_alert_group_signal:
with django_capture_on_commit_callbacks(execute=True):
delete_alert_group(alert_group.pk, user.pk)
assert mock_send_alert_group_signal.call_count == 1
with patch(
"apps.alerts.tasks.delete_alert_group.finish_delete_alert_group.apply_async", return_value=None
) as mock_finish_delete_alert_group:
with patch.object(
SlackClient,
api_method,
side_effect=SlackAPIRatelimitError(
response=build_slack_response({"ok": False, "error": "ratelimited"}, headers={"Retry-After": 42})
),
):
send_alert_group_signal_for_delete(*mock_send_alert_group_signal.call_args.args)
assert mock_finish_delete_alert_group.call_count == 0
# Check task is retried gracefully
mock_send_alert_group_signal_for_delete.assert_called_once_with(
mock_send_alert_group_signal.call_args.args, countdown=42
)
@pytest.mark.parametrize("api_method", ["reactions_remove", "chat_delete"])
@patch.object(delete_alert_group, "apply_async")
@pytest.mark.django_db
def test_delete_slack_api_error_other_than_ratelimit(
mock_delete_alert_group,
api_method,
make_organization_with_slack_team_identity,
make_user,
make_alert_receive_channel,
make_alert_group,
make_alert,
make_slack_channel,
make_slack_message,
make_resolution_note_slack_message,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
slack_channel1 = make_slack_channel(slack_team_identity)
slack_channel2 = make_slack_channel(slack_team_identity)
user = make_user(organization=organization)
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group, raw_request_data={})
# Create Slack messages
chore: drop usage of `SlackMessage.organization` + drop orphaned `SlackMessage`s (#5330) # What this PR does - Stops writing `SlackMessage.organization` + removes references to this field. [As we discussed](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733315887463279?thread_ts=1733311105.095309&cid=C083TU81TCH), we do not need this field on this model/table, `SlackMessage._slack_team_identity` is sufficient (`organization` will be dropped in a separate PR) - Adds a data migration script which: - drops orphaned `SlackMessage` records; ie. ones which, even after the [`engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py`](https://github.com/grafana/oncall/blob/dev/engine/apps/slack/migrations/0007_migrate_slackmessage_channel_id.py) migration, still don't have a `SlackMessage.channel` id filled in (we discussed + agreed on dropping these records [here](https://raintank-corp.slack.com/archives/C083TU81TCH/p1733329914516859?thread_ts=1733311105.095309&cid=C083TU81TCH)) - fills in empty `SlackMessage.slack_team_identity` values (from `slack_message.channel.slack_team_identity`) ### Other notes On the `organization` topic. We store records in `SlackMessage` for two purposes (AFAICT), and in both cases, we have references back to the `organization`: - alert groups - `slack_message.alert_group.channel.organization` - shift swap requests - `shift_swap_request.schedule.organization` ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes.
2024-12-06 11:43:40 -05:00
make_slack_message(slack_channel1, alert_group=alert_group)
make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
posted_by_bot=True,
slack_channel=slack_channel1,
ts="test1_ts",
)
make_resolution_note_slack_message(
alert_group=alert_group,
user=user,
added_by_user=user,
added_to_resolution_note=True,
slack_channel=slack_channel2,
ts="test2_ts",
)
with patch.object(
SlackClient,
api_method,
side_effect=SlackAPIMessageNotFoundError(
response=build_slack_response({"ok": False, "error": "message_not_found"})
),
):
delete_alert_group(alert_group.pk, user.pk) # check no exception is raised
# Check task is not retried
mock_delete_alert_group.assert_not_called()
@pytest.mark.django_db
def test_alerts_count_gt(
make_organization,
make_alert_receive_channel,
make_alert_group,
make_alert,
):
organization = make_organization()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
# Check case when there is no alerts
assert alert_group.alerts_count_gt(1) is False
make_alert(alert_group, raw_request_data={})
make_alert(alert_group, raw_request_data={})
assert alert_group.alerts_count_gt(1) is True
assert alert_group.alerts_count_gt(2) is False
assert alert_group.alerts_count_gt(3) is False
@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
@pytest.mark.django_db
def test_silence_by_user_for_period(
mocked_start_unsilence_task,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
raw_next_step_eta = "2023-08-28T09:27:26.627047Z"
silence_delay = 120 * 60
updated_raw_next_step_eta = "2023-08-28T11:27:36.627047Z" # silence_delay + START_ESCALATION_DELAY
alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
alert_group.raw_escalation_snapshot["next_step_eta"] = raw_next_step_eta
assert not alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.silence_by_user_or_backsync(user, silence_delay=silence_delay)
assert alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.refresh_from_db()
assert alert_group.silenced
assert alert_group.raw_escalation_snapshot["next_step_eta"] == updated_raw_next_step_eta
assert mocked_start_unsilence_task.called
@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
@pytest.mark.django_db
def test_silence_by_user_forever(
mocked_start_unsilence_task,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
raw_next_step_eta = "2023-08-28T09:27:26.627047Z"
alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
alert_group.raw_escalation_snapshot["next_step_eta"] = raw_next_step_eta
assert not alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.silence_by_user_or_backsync(user, silence_delay=None)
assert alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.refresh_from_db()
assert alert_group.silenced
assert alert_group.raw_escalation_snapshot["next_step_eta"] == raw_next_step_eta
assert not mocked_start_unsilence_task.called
@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
@pytest.mark.django_db
def test_bulk_silence_for_period(
mocked_start_unsilence_task,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
raw_next_step_eta = "2023-08-28T09:27:26.627047Z"
silence_delay = 120 * 60
updated_raw_next_step_eta = "2023-08-28T11:27:36.627047Z" # silence_delay + START_ESCALATION_DELAY
alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
alert_group.raw_escalation_snapshot["next_step_eta"] = raw_next_step_eta
alert_group.save()
alert_groups = AlertGroup.objects.filter(pk__in=[alert_group.id])
assert not alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
AlertGroup.bulk_silence(user, alert_groups, silence_delay=silence_delay)
assert alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.refresh_from_db()
assert alert_group.silenced
assert alert_group.raw_escalation_snapshot["next_step_eta"] == updated_raw_next_step_eta
assert mocked_start_unsilence_task.called
@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
@pytest.mark.django_db
def test_bulk_silence_forever(
mocked_start_unsilence_task,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
raw_next_step_eta = "2023-08-28T09:27:26.627047Z"
alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot()
alert_group.raw_escalation_snapshot["next_step_eta"] = raw_next_step_eta
alert_group.save()
alert_groups = AlertGroup.objects.filter(pk__in=[alert_group.id])
assert not alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
AlertGroup.bulk_silence(user, alert_groups, silence_delay=0)
assert alert_group.log_records.filter(
type=AlertGroupLogRecord.TYPE_SILENCE,
author=user,
).exists()
alert_group.refresh_from_db()
assert alert_group.silenced
assert alert_group.raw_escalation_snapshot["next_step_eta"] == raw_next_step_eta
assert not mocked_start_unsilence_task.called
@pytest.mark.parametrize("action_source", ActionSource)
@pytest.mark.django_db
def test_alert_group_log_record_action_source(
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
action_source,
):
"""Test that action source is saved in alert group log record"""
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
root_alert_group = make_alert_group(alert_receive_channel)
if action_source == ActionSource.BACKSYNC:
base_kwargs = {
"source_channel": alert_receive_channel,
}
else:
base_kwargs = {
"user": user,
}
# Silence alert group
alert_group.silence_by_user_or_backsync(**base_kwargs, silence_delay=42, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_SILENCE, action_source)
# Unsilence alert group
alert_group.un_silence_by_user_or_backsync(**base_kwargs, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_UN_SILENCE, action_source)
# Acknowledge alert group
alert_group.acknowledge_by_user_or_backsync(**base_kwargs, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_ACK, action_source)
# Unacknowledge alert group
alert_group.un_acknowledge_by_user_or_backsync(**base_kwargs, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_UN_ACK, action_source)
# Resolve alert group
alert_group.resolve_by_user_or_backsync(**base_kwargs, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_RESOLVED, action_source)
# Unresolve alert group
alert_group.un_resolve_by_user_or_backsync(**base_kwargs, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_UN_RESOLVED, action_source)
if action_source != ActionSource.BACKSYNC:
# Attach alert group
alert_group.attach_by_user(user, root_alert_group, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_ATTACHED, action_source)
# Unattach alert group
alert_group.un_attach_by_user(user, action_source=action_source)
log_record = alert_group.log_records.last()
assert (log_record.type, log_record.action_source) == (AlertGroupLogRecord.TYPE_UNATTACHED, action_source)
Add responders improvements (#3128) # What this PR does https://www.loom.com/share/c5e10b5ec51343d0954c6f41cfd6a5fb ## Summary of backend changes - Add `AlertReceiveChannel.get_orgs_direct_paging_integrations` method and `AlertReceiveChannel.is_contactable` property. These are needed to be able to (optionally) filter down teams, in the `GET /teams` internal API endpoint ([here](https://github.com/grafana/oncall/pull/3128/files#diff-a4bd76e557f7e11dafb28a52c1034c075028c693b3c12d702d53c07fc6f24c05R55-R63)), to just teams that have a "contactable" Direct Paging integration - `engine/apps/alerts/paging.py` - update these functions to support new UX. In short `direct_paging` no longer takes a list of `ScheduleNotifications` or an `EscalationChain` object - add `user_is_oncall` helper function - add `_construct_title` helper function. In short if no `title` is provided, which is the case for Direct Pages originating from OnCall (either UI or Slack), then the format is `f"{from_user.username} is paging <team.name (if team is specified> <comma separated list of user.usernames> to join escalation"` - `engine/apps/api/serializers/team.py` - add `number_of_users_currently_oncall` attribute to response schema ([code](https://github.com/grafana/oncall/pull/3128/files#diff-26af48f796c9e987a76447586dd0f92349783d6ea6a0b6039a2f0f28bd58c2ebR45-R52)) - `engine/apps/api/serializers/user.py` - add `is_currently_oncall` attribute to response schema ([code](https://github.com/grafana/oncall/pull/3128/files#diff-6744b5544ebb120437af98a996da5ad7d48ee1139a6112c7e3904010ab98f232R157-R162)) - `engine/apps/api/views/team.py` - add support for two new optional query params `only_include_notifiable_teams` and `include_no_team` ([code](https://github.com/grafana/oncall/pull/3128/files#diff-a4bd76e557f7e11dafb28a52c1034c075028c693b3c12d702d53c07fc6f24c05R55-R70)) - `engine/apps/api/views/user.py` - in the `GET /users` internal API endpoint, when specifying the `search` query param now also search on `teams__name` ([code](https://github.com/grafana/oncall/pull/3128/files#diff-30309629484ad28e6fe09816e1bd226226d652ea977b6f3b6775976c729bf4b5R223); this is a new UX requirement) - add support for a new optional query param, `is_currently_oncall`, to allow filtering users based on.. whether they are currently on call or not ([code](https://github.com/grafana/oncall/pull/3128/files#diff-30309629484ad28e6fe09816e1bd226226d652ea977b6f3b6775976c729bf4b5R272-R282)) - remove `check_availability` endpoint (no longer used with new UX; also removed references in frontend code) - `engine/apps/slack/scenarios/paging.py` and `engine/apps/slack/scenarios/manage_responders.py` - update Slack workflows to support new UX. Schedules are no longer a concept here. When creating a new alert group via `/escalate` the user either specifies a team and/or user(s) (they must specify at least one of the two and validation is done here to check this). When adding responders to an existing alert group it's simply a list of users that they can add, no more schedules. - add `Organization.slack_is_configured` and `Organization.telegram_is_configured` properties. These are needed to support [this new functionality ](https://github.com/grafana/oncall/pull/3128/files#diff-9d96504027309f2bd1e95352bac1433b09b60eb4fafb611b52a6c15ed16cbc48R271-R272) in the `AlertReceiveChannel` model. ## Summary of frontend changes - Refactor/rename `EscalationVariants` component to `AddResponders` + remove `grafana-plugin/src/containers/UserWarningModal` (no longer needed with new UX) - Remove `grafana-plugin/src/models/user.ts` as it seemed to be a duplicate of `grafana-plugin/src/models/user/user.types.ts` Related to https://github.com/grafana/incident/issues/4278 - Closes #3115 - Closes #3116 - Closes #3117 - Closes #3118 - Closes #3177 ## TODO - [x] make frontend changes - [x] update Slack backend functionality - [x] update public documentation - [x] add/update e2e tests ## Post-deploy To-dos - [ ] update dev/ops/production Slack bots to update `/escalate` command description (should now say "Direct page a team or user(s)") ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required)
2023-10-27 12:12:07 -04:00
@pytest.mark.django_db
def test_alert_group_get_paged_users(
make_organization_and_user,
make_user_for_organization,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
other_user = make_user_for_organization(organization)
alert_receive_channel = make_alert_receive_channel(organization)
def _make_log_record(alert_group, user, log_type, important=False):
alert_group.log_records.create(
type=log_type,
author=user,
reason="paged user",
step_specific_info={
"user": user.public_primary_key,
"important": important,
},
)
# user was paged - also check that important is persisted/available
alert_group = make_alert_group(alert_receive_channel)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, other_user, AlertGroupLogRecord.TYPE_DIRECT_PAGING, True)
paged_users = {u["pk"]: u["important"] for u in alert_group.get_paged_users()}
assert user.public_primary_key in paged_users
assert paged_users[user.public_primary_key] is False
assert other_user.public_primary_key in paged_users
assert paged_users[other_user.public_primary_key] is True
# user was paged and then unpaged
alert_group = make_alert_group(alert_receive_channel)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER)
_make_log_record(alert_group, other_user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
assert alert_group.get_paged_users()[0]["pk"] == other_user.public_primary_key
# user was paged, unpaged, and then paged again - they should only show up once
alert_group = make_alert_group(alert_receive_channel)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
paged_users = alert_group.get_paged_users()
assert len(paged_users) == 1
assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key
# user was paged and then paged again - they should only show up once
alert_group = make_alert_group(alert_receive_channel)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
paged_users = alert_group.get_paged_users()
assert len(paged_users) == 1
assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key
# user was paged and then paged again, then unpaged - they should not show up
alert_group = make_alert_group(alert_receive_channel)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER)
paged_users = alert_group.get_paged_users()
assert len(paged_users) == 0
# adding extra unpage events should not break things
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER)
_make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING)
paged_users = alert_group.get_paged_users()
assert len(paged_users) == 1
assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key
@patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None)
@pytest.mark.django_db
def test_filter_active_alert_groups(
mocked_start_unsilence_task,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
# alert groups with active escalation
alert_group_active = make_alert_group(alert_receive_channel)
alert_group_active_silenced = make_alert_group(alert_receive_channel)
alert_group_active_silenced.silence_by_user_or_backsync(user, silence_delay=1800) # silence by period
# alert groups with inactive escalation
alert_group_1 = make_alert_group(alert_receive_channel)
alert_group_1.acknowledge_by_user_or_backsync(user)
alert_group_2 = make_alert_group(alert_receive_channel)
alert_group_2.resolve_by_user_or_backsync(user)
alert_group_3 = make_alert_group(alert_receive_channel)
alert_group_3.attach_by_user(user, alert_group_active)
alert_group_4 = make_alert_group(alert_receive_channel)
alert_group_4.silence_by_user_or_backsync(user, silence_delay=None) # silence forever
active_alert_groups = AlertGroup.objects.filter_active()
assert active_alert_groups.count() == 2
assert alert_group_active in active_alert_groups
assert alert_group_active_silenced in active_alert_groups
@patch("apps.alerts.models.AlertGroup.hard_delete")
@patch("apps.alerts.models.AlertGroup.un_attach_by_delete")
@patch("apps.alerts.models.AlertGroup.stop_escalation")
@patch("apps.alerts.tasks.delete_alert_group.alert_group_action_triggered_signal")
@pytest.mark.django_db
def test_delete_by_user(
mock_alert_group_action_triggered_signal,
_mock_stop_escalation,
_mock_un_attach_by_delete,
_mock_hard_delete,
make_organization_and_user,
make_alert_receive_channel,
make_alert_group,
django_capture_on_commit_callbacks,
):
organization, user = make_organization_and_user()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
# make a few dependent alert groups
dependent_alert_groups = [make_alert_group(alert_receive_channel, root_alert_group=alert_group) for _ in range(3)]
assert alert_group.log_records.filter(type=AlertGroupLogRecord.TYPE_DELETED).count() == 0
with patch(
"apps.alerts.tasks.delete_alert_group.send_alert_group_signal_for_delete.delay", return_value=None
) as mock_send_alert_group_signal:
with django_capture_on_commit_callbacks(execute=True):
delete_alert_group(alert_group.pk, user.pk)
assert mock_send_alert_group_signal.call_count == 1
assert alert_group.log_records.filter(type=AlertGroupLogRecord.TYPE_DELETED).count() == 1
deleted_log_record = alert_group.log_records.get(type=AlertGroupLogRecord.TYPE_DELETED)
alert_group.stop_escalation.assert_called_once_with()
with patch(
"apps.alerts.tasks.delete_alert_group.finish_delete_alert_group.apply_async", return_value=None
) as mock_finish_delete_alert_group:
send_alert_group_signal_for_delete(*mock_send_alert_group_signal.call_args.args)
assert mock_finish_delete_alert_group.call_count == 1
mock_alert_group_action_triggered_signal.send.assert_called_once_with(
sender=send_alert_group_signal_for_delete,
log_record=deleted_log_record.pk,
force_sync=True,
)
finish_delete_alert_group(alert_group.pk)
alert_group.hard_delete.assert_called_once_with()
for dependent_alert_group in dependent_alert_groups:
dependent_alert_group.un_attach_by_delete.assert_called_with()
@pytest.mark.django_db
def test_integration_config_on_alert_group_created(make_organization, make_alert_receive_channel):
organization = make_organization()
alert_receive_channel = make_alert_receive_channel(organization, grouping_id_template="group_to_one_group")
with patch.object(
alert_receive_channel.config, "on_alert_group_created", create=True
) as mock_on_alert_group_created:
for _ in range(2):
alert = Alert.create(
title="the title",
message="the message",
alert_receive_channel=alert_receive_channel,
raw_request_data={},
integration_unique_data={},
image_url=None,
link_to_upstream_details=None,
)
assert alert.group.alerts.count() == 2
mock_on_alert_group_created.assert_called_once_with(alert.group)
@patch.object(AlertGroup, "start_escalation_if_needed")
@pytest.mark.django_db
@pytest.mark.parametrize(
"new_state,log_type,to_firing_log_type",
[
(AlertGroupState.ACKNOWLEDGED, AlertGroupLogRecord.TYPE_ACK, AlertGroupLogRecord.TYPE_UN_ACK),
(AlertGroupState.RESOLVED, AlertGroupLogRecord.TYPE_RESOLVED, AlertGroupLogRecord.TYPE_UN_RESOLVED),
(AlertGroupState.SILENCED, AlertGroupLogRecord.TYPE_SILENCE, AlertGroupLogRecord.TYPE_UN_SILENCE),
],
)
def test_update_state_by_backsync(
mock_start_escalation_if_needed,
new_state,
log_type,
to_firing_log_type,
make_organization,
make_alert_receive_channel,
make_alert_group,
):
organization = make_organization()
source_channel = make_alert_receive_channel(organization)
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
expected_log_data = (ActionSource.BACKSYNC, None, {"source_integration_name": source_channel.verbal_name})
assert alert_group.state == AlertGroupState.FIRING
# set to new_state
alert_group.update_state_by_backsync(new_state, source_channel=source_channel)
alert_group.refresh_from_db()
assert alert_group.state == new_state
last_log = alert_group.log_records.last()
assert (last_log.action_source, last_log.author, last_log.step_specific_info) == expected_log_data
assert last_log.type == log_type
# set back to firing
alert_group.update_state_by_backsync(AlertGroupState.FIRING, source_channel=source_channel)
alert_group.refresh_from_db()
assert alert_group.state == AlertGroupState.FIRING
last_log = alert_group.log_records.last()
assert (last_log.action_source, last_log.author, last_log.step_specific_info) == expected_log_data
assert last_log.type == to_firing_log_type
mock_start_escalation_if_needed.assert_called_once()
@pytest.mark.django_db
def test_alert_group_created_if_resolve_condition_but_auto_resolving_disabled(
make_organization,
make_alert_receive_channel,
make_alert_group,
):
organization = make_organization()
# grouping condition will match. resolve condition will evaluate to True, but auto resolving is disabled
grouping_distinction = "abcdef"
alert_receive_channel = make_alert_receive_channel(
organization,
grouping_id_template=grouping_distinction,
resolve_condition_template="True",
allow_source_based_resolving=False,
)
# existing alert group, resolved, with a matching grouping distinction
resolved_alert_group = make_alert_group(
alert_receive_channel,
resolved=True,
distinction=hashlib.md5(grouping_distinction.encode()).hexdigest(),
)
# an alert for the same integration is received
alert = Alert.create(
title="the title",
message="the message",
alert_receive_channel=alert_receive_channel,
raw_request_data={},
integration_unique_data={},
image_url=None,
link_to_upstream_details=None,
)
# the alert will create a new alert group
assert alert.group != resolved_alert_group
class TestAlertGroupSlackChannelID:
@pytest.mark.django_db
def test_slack_channel_id_with_slack_message(
self,
make_organization_with_slack_team_identity,
make_alert_receive_channel,
make_slack_channel,
make_slack_message,
make_alert_group,
):
"""
Test that slack_channel_id returns the _channel_id from slack_message when slack_message exists.
"""
organization, slack_team_identity = make_organization_with_slack_team_identity()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
slack_channel = make_slack_channel(slack_team_identity)
slack_message = make_slack_message(slack_channel, alert_group=alert_group)
# Assert that slack_channel_id returns the channel.slack_id from slack_message
assert alert_group.slack_channel_id == slack_message.channel.slack_id
@pytest.mark.django_db
def test_slack_channel_id_with_channel_filter(
self,
make_organization_with_slack_team_identity,
make_alert_receive_channel,
make_channel_filter,
make_slack_channel,
make_alert_group,
):
"""
Test that slack_channel_id returns the slack_id from channel_filter.slack_channel_or_org_default.
"""
organization, slack_team_identity = make_organization_with_slack_team_identity()
alert_receive_channel = make_alert_receive_channel(organization)
slack_channel = make_slack_channel(slack_team_identity)
channel_filter = make_channel_filter(alert_receive_channel, slack_channel=slack_channel)
alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
# Assert that slack_channel_id returns the slack_id from the channel filter's Slack channel
assert alert_group.slack_channel_id == slack_channel.slack_id
@pytest.mark.django_db
def test_slack_channel_id_no_slack_message_no_channel_filter(
self,
make_organization_with_slack_team_identity,
make_alert_receive_channel,
make_alert_group,
):
"""
Test that slack_channel_id returns None when there is no slack_message and no channel_filter.
"""
organization, _ = make_organization_with_slack_team_identity()
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel, channel_filter=None)
# Assert that slack_channel_id is None
assert alert_group.slack_channel_id is None