Merge branch 'dev' into new-schedules

This commit is contained in:
Maxim 2022-09-06 16:53:30 +03:00
commit 056caaa288
215 changed files with 8566 additions and 11630 deletions

View file

@ -5,7 +5,7 @@ name: Build and Release
steps:
- name: Build Plugin
image: node:14.6.0-stretch
image: node:14.17.0-stretch
commands:
- apt-get update
- apt-get --assume-yes install jq
@ -16,7 +16,7 @@ steps:
- ls ./
- name: Sign and Package Plugin
image: node:14.6.0-stretch
image: node:14.17.0-stretch
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
@ -158,18 +158,11 @@ trigger:
---
kind: pipeline
type: docker
name: OSS Release
name: OSS plugin release
steps:
- name: Check Promote
image: alpine
commands:
- if [ -z "$DRONE_DEPLOY_TO" ]; then echo "Missing DRONE_DEPLOY_TO (Target)"; exit 1; fi
- if [ -z "$DRONE_TAG" ]; then echo "Missing DRONE_TAG"; exit 1; fi
- echo Promoting $DRONE_TAG to $DRONE_DEPLOY_TO
- name: Build Plugin
image: node:14.6.0-stretch
- name: build plugin
image: node:14.17.0-stretch
commands:
- apt-get update
- apt-get --assume-yes install jq
@ -178,23 +171,14 @@ steps:
- yarn --network-timeout 500000
- yarn build
- ls ./
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Sign and Package Plugin
image: node:14.6.0-stretch
- name: sign and package plugin
image: node:14.17.0-stretch
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
depends_on:
- Build Plugin
- build plugin
commands:
- apt-get update
- apt-get install zip
@ -206,7 +190,7 @@ steps:
- zip -r grafana-oncall-app.zip ./grafana-oncall-app
- if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app.zip grafana-oncall-app-${DRONE_TAG}.zip; fi
- name: Publish Plugin to grafana.com (release)
- name: publish plugin to grafana.com (release)
image: curlimages/curl:7.73.0
environment:
GRAFANA_API_KEY:
@ -214,32 +198,36 @@ steps:
commands:
- "curl -f -s -H \"Authorization: Bearer $${GRAFANA_API_KEY}\" -d \"download[any][url]=https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip\" -d \"download[any][md5]=$$(curl -sL https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip | md5sum | cut -d' ' -f1)\" -d url=https://github.com/grafana/oncall/grafana-plugin https://grafana.com/api/plugins"
depends_on:
- Sign and Package Plugin
- sign and package plugin
- name: Image Tag
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
kind: pipeline
type: docker
name: OSS engine release (amd64)
platform:
os: linux
arch: amd64
steps:
- name: set engine version
image: alpine
commands:
- apk add --no-cache bash git sed
- git fetch origin --tags
- chmod +x ./tools/image-tag.sh
- echo $(./tools/image-tag.sh)
- echo $(./tools/image-tag.sh) > .tags
- apk add --no-cache bash sed
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
- cat engine/settings/base.py | grep VERSION | head -1
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Build and Push Engine Docker Image Backend to Dockerhub
- name: build and push docker image
image: plugins/docker
settings:
repo: grafana/oncall
tags: ${DRONE_TAG}-amd64-linux
dockerfile: engine/Dockerfile
context: engine/
password:
@ -247,21 +235,94 @@ steps:
username:
from_secret: docker_username
depends_on:
- Image Tag
- name: Unrecognized Promote Target
image: alpine
commands:
- echo $DRONE_DEPLOY_TO is not a recognized promote target!
- exit 1
when:
target:
exclude:
- oss
- set engine version
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
kind: pipeline
type: docker
name: OSS engine release (arm64)
platform:
os: linux
arch: arm64
steps:
- name: set engine version
image: alpine
commands:
- apk add --no-cache bash sed
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
- cat engine/settings/base.py | grep VERSION | head -1
- name: build and push docker image
image: plugins/docker
settings:
repo: grafana/oncall
tags: ${DRONE_TAG}-arm64-linux
dockerfile: engine/Dockerfile
context: engine/
password:
from_secret: docker_password
username:
from_secret: docker_username
depends_on:
- set engine version
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
depends_on:
- OSS engine release (amd64)
- OSS engine release (arm64)
kind: pipeline
type: docker
name: manifest
steps:
- name: manifest tag
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "grafana/oncall:${DRONE_TAG}"
template: "grafana/oncall:${DRONE_TAG}-ARCH-OS"
platforms:
- linux/amd64
- linux/arm64
- name: manifest latest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "grafana/oncall:latest"
template: "grafana/oncall:${DRONE_TAG}-ARCH-OS"
platforms:
- linux/amd64
- linux/arm64
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
# Secret for pulling docker images.
@ -334,6 +395,6 @@ kind: secret
name: drone_token
---
kind: signature
hmac: a74dd831a3d0a87b8fc1db45699a6a834ea769da9f437c55979ae665948c3b3f
hmac: 8a060649c132677ba1b5693b5ac6c846c02f9a5bb645fe990b26a7ea42a0fb66
...

View file

@ -15,7 +15,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 14
node-version: 14.17.0
- name: Build
run: |
pip install $(grep "pre-commit" engine/requirements.txt)

View file

@ -16,7 +16,7 @@ jobs:
python-version: '3.9'
- uses: actions/setup-node@v3
with:
node-version: 14
node-version: 14.17.0
- uses: snyk/actions/setup@master
- name: Install Dependencies
run: |

View file

@ -1,5 +1,44 @@
# Change Log
## v1.0.33 (2022-09-06)
- Add raw alert view
- Add GitHub star button for OSS installations
- Restore alert group search functionality
- Bug fixes
## v1.0.32 (2022-09-01)
- Bug fixes
## v1.0.31 (2022-09-01)
- Bump celery version
- Fix oss to cloud connection
## v1.0.30 (2022-08-31)
- Bug fix: check user notification policy before access
## v1.0.29 (2022-08-31)
- Add arm64 docker image
## v1.0.28 (2022-08-31)
- Bug fixes
## v1.0.27 (2022-08-30)
- Bug fixes
## v1.0.26 (2022-08-26)
- Insight log's format fixes
- Remove UserNotificationPolicy auto-recreating
## v1.0.25 (2022-08-24)
- Bug fixes
## v1.0.24 (2022-08-24)
- Insight logs
- Default DATA_UPLOAD_MAX_MEMORY_SIZE to 1mb
## v1.0.23 (2022-08-23)
- Bug fixes
## v1.0.22 (2022-08-16)
- Make STATIC_URL configurable from environment variable
@ -59,7 +98,7 @@
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public api endpoint for for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
- Improve public api endpoint for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
## 1.0.0 (2022-06-14)

View file

@ -1,7 +1,9 @@
* [Developer quickstart](#developer-quickstart)
* [Code style](#code-style)
* [Backend setup](#backend-setup)
* [Frontend setup](#frontend-setup)
* [Slack application setup](#slack-application-setup)
* [Update drone build](#update-drone-build)
* [Troubleshooting](#troubleshooting)
* [ld: library not found for -lssl](#ld-library-not-found-for--lssl)
* [Could not build wheels for cryptography which use PEP 517 and cannot be installed directly](#could-not-build-wheels-for-cryptography-which-use-pep-517-and-cannot-be-installed-directly)
@ -131,6 +133,22 @@ extra_hosts:
For Slack app configuration check our docs: https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup
### Update drone build
The .drone.yml build file must be signed when changes are made to it. Follow these steps:
If you have not installed drone CLI follow [these instructions](https://docs.drone.io/cli/install/)
To sign the .drone.yml file:
```bash
export DRONE_SERVER=https://drone.grafana.net
# Get your drone token from https://drone.grafana.net/account
export DRONE_TOKEN=<Your DRONE_TOKEN>
drone sign --save grafana/oncall .drone.yml
```
## Troubleshooting
### ld: library not found for -lssl
@ -241,18 +259,3 @@ pytest -n4
5. Create a new Django Server run configuration to Run/Debug the engine
- Use a plugin such as EnvFile to load the .env file
- Change port from 8000 to 8080
## Update drone build
The .drone.yml build file must be signed when changes are made to it. Follow these steps:
If you have not installed drone CLI follow [these instructions](https://docs.drone.io/cli/install/)
To sign the .drone.yml file:
```bash
export DRONE_SERVER=https://drone.grafana.net
# Get your drone token from https://drone.grafana.net/account
export DRONE_TOKEN=<Your DRONE_TOKEN>
drone sign --save grafana/oncall .drone.yml
```

View file

@ -53,7 +53,7 @@ services:
retries: 10
grafana:
image: "grafana/grafana:9.0.0-beta3"
image: "grafana/grafana:main"
restart: always
mem_limit: 500m
cpus: 0.5

View file

@ -48,7 +48,7 @@ services:
condition: service_healthy
grafana:
image: "grafana/grafana:9.0.0-beta3"
image: "grafana/grafana:main"
restart: always
mem_limit: 500m
cpus: 0.5
@ -65,5 +65,5 @@ services:
ports:
- 3000:3000
depends_on:
mysql-to-create-grafana-db:
mysql:
condition: service_healthy

View file

@ -16,15 +16,22 @@ weight: 300
# Telegram integration for Grafana OnCall
You can use Telegram to deliver alert group notifications to a dedicated channel, and allow users to perform notification actions.
You can manage alerts either directly in your personal Telegram DMs or in a dedicated team channel.
Each alert group notification is assigned a dedicated discussion. Users can perform notification actions (acknowledge, resolve, silence), create reports, and discuss alerts in the comments section of the discussions.
## Configure Telegram user settings in Grafana OnCall
In case an integration route is not configured to use a Telegram channel, users will receive messages with alert group contents, logs and actions in their DMs.
To receive alert group contents, escalation logs and to be able to perform actions (acknowledge, resolve, silence) in Telegram DMs, please refer to the following steps:
## Connect to Telegram
1. In your profile, find the Telegram setting and click **Connect**.
1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
1. Click **Start** when the OnCall bot messages you and wait for the connection confirmation.
1. Done! Now you can receive alerts directly to your Telegram DMs.
Connect your organization's Telegram account to your Grafana OnCall instance by following the instructions provided in OnCall. You can use the following steps as a reference.
If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
## (Optional) Connect to a Telegram channel
In case you want to manage alerts in a dedicated Telegram channel, please use the following steps as a reference.
> **NOTE:** Only Grafana users with the administrator role can configure OnCall settings.
@ -42,10 +49,5 @@ Connect your organization's Telegram account to your Grafana OnCall instance by
1. In OnCall, send the provided verification code to the channel.
1. Make sure users connect to Telegram in their OnCall user profile.
## Configure Telegram user settings in OnCall
1. In your profile, find the Telegram setting and click **Connect**.
1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
1. Click **Start** when the OnCall bot messages you.
If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
Each alert group is assigned a dedicated discussion. Users can perform actions (acknowledge, resolve, silence), and discuss alerts in the comments section of the discussions.
In case an integration route is not configured to use a Telegram channel, users will receive messages with alert group contents, logs and actions in their DMs.

View file

@ -25,6 +25,8 @@ There are three Grafana OnCall OSS environments available:
## Production Environment
We suggest using our official helm chart for the reliable production deployment of Grafana OnCall. It will deploy Grafana OnCall engine and celery workers, along with RabbitMQ cluster, Redis Cluster, and the database.
>**Note:** The Grafana OnCall engine currently supports one instance of the Grafana OnCall plugin at a time.
Check the [helm chart](https://github.com/grafana/oncall/tree/dev/helm/oncall) for more details.
We'll always be happy to provide assistance with production deployment in [our communities](https://github.com/grafana/oncall#join-community)!
@ -164,13 +166,11 @@ lt --port 8080 -s pretty-turkey-83 --print-requests
The Telegram integration for Grafana OnCall is designed for collaborative team work and improved incident response. Refer to the following steps to configure the Telegram integration:
1. Ensure your OnCall environment is up and running.
1. Request [BotFather](https://t.me/BotFather) for a key, then add your key in `TELEGRAM_TOKEN` in your Grafana OnCall **Env Variables**.
1. Set `TELEGRAM_WEBHOOK_HOST` with your external URL for your Grafana OnCall.
1. From the **ChatOps** tab in Grafana OnCall, click **Telegram**.
1. Ensure your Grafana OnCall environment is up and running.
2. Create a Telegram bot using [BotFather](https://t.me/BotFather) and save the token provided by BotFather. Please make sure to disable **Group Privacy** for the bot (Bot Settings -> Group Privacy -> Turn off).
3. Paste the token provided by BotFather to the `TELEGRAM_TOKEN` variable on the **Env Variables** page of your Grafana OnCall instance.
4. Set the `TELEGRAM_WEBHOOK_HOST` variable to the external address of your Grafana OnCall instance. Please note that `TELEGRAM_WEBHOOK_HOST` must start with `https://` and be publicly available (meaning that it can be reached by Telegram servers). If your host is private or local, consider using a reverse proxy (e.g. [ngrok](https://ngrok.com)).
5. Now you can connect Telegram accounts on the **Users** page and receive alert groups to Telegram direct messages. Alternatively, in case you want to connect Telegram channels to your Grafana OnCall environment, navigate to the **ChatOps** tab.
## Grafana OSS-Cloud Setup

View file

@ -23,7 +23,7 @@ class AlertAdmin(CustomModelAdmin):
@admin.register(AlertGroup)
class AlertGroupAdmin(CustomModelAdmin):
list_display = ("id", "public_primary_key", "verbose_name", "channel", "channel_filter", "state", "started_at")
list_display = ("id", "public_primary_key", "web_title_cache", "channel", "channel_filter", "state", "started_at")
list_filter = ("started_at",)
def get_queryset(self, request):

View file

@ -659,9 +659,7 @@ class IncidentLogBuilder:
# last passed step order + 1
notification_policy_order = last_user_log.notification_policy.order + 1
notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
user=user_to_notify, important=important
)
notification_policies = UserNotificationPolicy.objects.filter(user=user_to_notify, important=important)
for notification_policy in notification_policies:
future_notification = notification_policy.order >= notification_policy_order

View file

@ -69,7 +69,6 @@ class IntegrationOptionsMixin:
"grouping_id",
"resolve_condition",
"acknowledge_condition",
"group_verbose_name",
"source_link",
]

View file

@ -0,0 +1,28 @@
# Generated by Django 3.2.15 on 2022-09-01 16:54
from django.db import migrations
from apps.alerts.models import AlertReceiveChannel
from apps.alerts.tasks import update_web_title_cache_for_alert_receive_channel
def populate_web_title_cache(apps, _):
pks = AlertReceiveChannel.objects_with_deleted.values_list("pk", flat=True)
for pk in pks:
update_web_title_cache_for_alert_receive_channel.delay(pk)
class Migration(migrations.Migration):
dependencies = [
('alerts', '0006_alertgroup_alerts_aler_channel_ee84a7_idx'),
]
operations = [
migrations.RenameField(
model_name='alertgroup',
old_name='verbose_name',
new_name='web_title_cache',
),
migrations.RunPython(populate_web_title_cache, migrations.RunPython.noop),
]

View file

@ -179,26 +179,26 @@ class Alert(models.Model):
is_resolve_signal = False
is_acknowledge_signal = False
group_distinction = None
group_verbose_name = "Incident"
acknowledge_condition_template = template_manager.get_attr_template(
"acknowledge_condition", alert_receive_channel
)
resolve_condition_template = template_manager.get_attr_template("resolve_condition", alert_receive_channel)
grouping_id_template = template_manager.get_attr_template("grouping_id", alert_receive_channel)
# use get_default_attr_template because there is no ability to customize group_verbose_name, only default value
group_verbose_name_template = template_manager.get_default_attr_template(
"group_verbose_name", alert_receive_channel
)
if group_verbose_name_template is not None:
group_verbose_name, _ = apply_jinja_template(group_verbose_name_template, raw_request_data)
# set web_title_cache to web title to allow alert group searching based on web_title_cache
web_title_template = template_manager.get_attr_template("title", alert_receive_channel, render_for="web")
if web_title_template:
web_title_cache = apply_jinja_template(web_title_template, raw_request_data)[0] or None
else:
web_title_cache = None
if grouping_id_template is not None:
group_distinction, _ = apply_jinja_template(grouping_id_template, raw_request_data)
# Insert demo uuid to prevent grouping of demo alerts.
if is_demo:
group_distinction = cls.insert_demo_uuid(group_distinction)
# Insert random uuid to prevent grouping of demo alerts or alerts with group_distinction=None
if is_demo or not group_distinction:
group_distinction = cls.insert_random_uuid(group_distinction)
if group_distinction is not None:
group_distinction = hashlib.md5(str(group_distinction).encode()).hexdigest()
@ -220,11 +220,11 @@ class Alert(models.Model):
is_resolve_signal=is_resolve_signal,
is_acknowledge_signal=is_acknowledge_signal,
group_distinction=group_distinction,
group_verbose_name=group_verbose_name,
web_title_cache=web_title_cache,
)
@staticmethod
def insert_demo_uuid(distinction):
def insert_random_uuid(distinction):
if distinction is not None:
distinction += str(uuid4())
else:

View file

@ -82,7 +82,7 @@ class AlertGroupQuerySet(models.QuerySet):
# Create a new group if we couldn't group it to any existing ones
try:
return (
self.create(**search_params, is_open_for_grouping=True, verbose_name=group_data.group_verbose_name),
self.create(**search_params, is_open_for_grouping=True, web_title_cache=group_data.web_title_cache),
True,
)
except IntegrityError:
@ -134,7 +134,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
STATUS_CHOICES = ((NEW, "New"), (ACKNOWLEDGED, "Acknowledged"), (RESOLVED, "Resolved"), (SILENCED, "Silenced"))
GroupData = namedtuple(
"GroupData", ["is_resolve_signal", "group_distinction", "group_verbose_name", "is_acknowledge_signal"]
"GroupData", ["is_resolve_signal", "group_distinction", "web_title_cache", "is_acknowledge_signal"]
)
SOURCE, USER, NOT_YET, LAST_STEP, ARCHIVED, WIPED, DISABLE_MAINTENANCE = range(7)
@ -177,7 +177,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
# For example different types of alerts from the same channel should go to different groups.
# Distinction is what describes their difference.
distinction = models.CharField(max_length=100, null=True, default=None, db_index=True)
verbose_name = models.TextField(null=True, default=None)
web_title_cache = models.TextField(null=True, default=None)
inside_organization_number = models.IntegerField(default=0)
@ -357,7 +357,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
]
def __str__(self):
return f"{self.pk}: {self.verbose_name}"
return f"{self.pk}: {self.web_title_cache}"
@property
def is_maintenance_incident(self):
@ -899,13 +899,13 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.
self.resolve(resolved_by=AlertGroup.WIPED)
self.stop_escalation()
self.distinction = ""
self.verbose_name = "Wiped incident"
self.web_title_cache = None
self.wiped_at = timezone.now()
self.wiped_by = user
for alert in self.alerts.all():
alert.wipe(wiped_by=self.wiped_by, wiped_at=self.wiped_at)
self.save(update_fields=["distinction", "verbose_name", "wiped_at", "wiped_by"])
self.save(update_fields=["distinction", "web_title_cache", "wiped_at", "wiped_by"])
log_record = self.log_records.create(
type=AlertGroupLogRecord.TYPE_WIPED,

View file

@ -27,9 +27,9 @@ from apps.integrations.tasks import create_alert, create_alertmanager_alerts
from apps.slack.constants import SLACK_RATE_LIMIT_DELAY, SLACK_RATE_LIMIT_TIMEOUT
from apps.slack.tasks import post_slack_rate_limit_message
from apps.slack.utils import post_message_to_channel
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.utils import create_engine_url
from common.exceptions import TeamCanNotBeChangedError, UnableToSendDemoAlert
from common.insight_log import EntityEvent, write_resource_insight_log
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
logger = logging.getLogger(__name__)
@ -342,66 +342,6 @@ class AlertReceiveChannel(IntegrationOptionsMixin, MaintainableObject):
self.save(update_fields=["rate_limit_message_task_id", "rate_limited_in_slack_at"])
post_slack_rate_limit_message.apply_async((self.pk,), countdown=delay, task_id=task_id)
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
name: Grafana :blush:, team: example, auto resolve allowed: Yes
templates:
Slack title: *<{{ grafana_oncall_link }}|#{{ grafana_oncall_id }} Custom title>* via {{ integration_name }}
{% if source_link %}
(*<{{ source_link }}|source>*)
{%- endif %},
Slack message: default,
Slack image url: default,
SMS title: default,
Phone call title: default,
Web title: default,
Web message: default,
Web image url: default,
Email title: default,
Email message: default,
Telegram title: default,
Telegram message: default,
Telegram image url: default,
Source link: default,
Grouping id: default,
Resolve condition: default,
Acknowledge condition: default
"""
result = f"name: {self.verbal_name}, team: {self.team.name if self.team else 'No team'}"
if self.is_able_to_autoresolve:
result += f", auto resolve allowed: {'Yes' if self.allow_source_based_resolving else 'No'}"
if self.integration == AlertReceiveChannel.INTEGRATION_SLACK_CHANNEL:
slack_channel = None
if self.integration_slack_channel_id:
SlackChannel = apps.get_model("slack", "SlackChannel")
slack_channel = SlackChannel.objects.filter(
slack_team_identity=self.organization.slack_team_identity,
slack_id=self.integration_slack_channel_id,
).first()
result += f", slack channel: {slack_channel.name if slack_channel else 'not selected'}"
result += (
f"\ntemplates:\nSlack title: {self.slack_title_template or 'default'},\n"
f"Slack message: {self.slack_message_template or 'default'},\n"
f"Slack image url: {self.slack_image_url_template or 'default'},\n"
f"SMS title: {self.sms_title_template or 'default'},\n"
f"Phone call title: {self.phone_call_title_template or 'default'},\n"
f"Web title: {self.web_title_template or 'default'},\n"
f"Web message: {self.web_message_template or 'default'},\n"
f"Web image url: {self.web_image_url_template or 'default'},\n"
f"Email title: {self.email_title_template or 'default'},\n"
f"Email message: {self.email_message_template or 'default'},\n"
f"Telegram title: {self.telegram_title_template or 'default'},\n"
f"Telegram message: {self.telegram_message_template or 'default'},\n"
f"Telegram image url: {self.telegram_image_url_template or 'default'},\n"
f"Source link: {self.source_link_template or 'default'},\n"
f"Grouping id: {self.grouping_id_template or 'default'},\n"
f"Resolve condition: {self.resolve_condition_template or 'default'},\n"
f"Acknowledge condition: {self.acknowledge_condition_template or 'default'}"
)
return result
@property
def alert_groups_count(self):
return self.alert_groups.count()
@ -658,6 +598,55 @@ class AlertReceiveChannel(IntegrationOptionsMixin, MaintainableObject):
AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING,
)
# Insight logs
@property
def insight_logs_type_verbal(self):
return "integration"
@property
def insight_logs_verbal(self):
return self.verbal_name
@property
def insight_logs_serialized(self):
result = {
"name": self.verbal_name,
"allow_source_based_resolving": self.allow_source_based_resolving,
"slack_title": self.slack_title_template or "default",
"slack_message": self.slack_message_template or "default",
"slack_image_url": self.slack_image_url_template or "default",
"sms_title": self.sms_title_template or "default",
"phone_call_title": self.phone_call_title_template or "default",
"web_title": self.web_title_template or "default",
"web_message": self.web_message_template or "default",
"web_image_url_template": self.web_image_url_template or "default",
"email_title_template": self.email_title_template or "default",
"email_message": self.email_message_template or "default",
"telegram_title": self.telegram_title_template or "default",
"telegram_message": self.telegram_message_template or "default",
"telegram_image_url": self.telegram_image_url_template or "default",
"source_link": self.source_link_template or "default",
"grouping_id": self.grouping_id_template or "default",
"resolve_condition": self.resolve_condition_template or "default",
"acknowledge_condition": self.acknowledge_condition_template or "default",
}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
@receiver(post_save, sender=AlertReceiveChannel)
def listen_for_alertreceivechannel_model_save(sender, instance, created, *args, **kwargs):
@ -665,30 +654,15 @@ def listen_for_alertreceivechannel_model_save(sender, instance, created, *args,
IntegrationHeartBeat = apps.get_model("heartbeat", "IntegrationHeartBeat")
if created:
description = f"New integration {instance.verbal_name} was created"
create_organization_log(
instance.organization,
instance.author,
type=OrganizationLogType.TYPE_INTEGRATION_CREATED,
description=description,
)
write_resource_insight_log(instance=instance, author=instance.author, event=EntityEvent.CREATED)
default_filter = ChannelFilter(alert_receive_channel=instance, filtering_term=None, is_default=True)
default_filter.save()
filter_verbal = default_filter.verbal_name_for_clients.capitalize()
description = f"{filter_verbal} was created for integration {instance.verbal_name}"
create_organization_log(
instance.organization,
None,
OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED,
description,
)
write_resource_insight_log(instance=default_filter, author=instance.author, event=EntityEvent.CREATED)
TEN_MINUTES = 600 # this is timeout for cloud heartbeats
if instance.is_available_for_integration_heartbeat:
IntegrationHeartBeat.objects.create(alert_receive_channel=instance, timeout_seconds=TEN_MINUTES)
description = f"Heartbeat for integration {instance.verbal_name} was created"
create_organization_log(
instance.organization, None, OrganizationLogType.TYPE_HEARTBEAT_CREATED, description
)
heartbeat = IntegrationHeartBeat.objects.create(alert_receive_channel=instance, timeout_seconds=TEN_MINUTES)
write_resource_insight_log(instance=heartbeat, author=instance.author, event=EntityEvent.CREATED)
if instance.integration == AlertReceiveChannel.INTEGRATION_GRAFANA_ALERTING:
if created:

View file

@ -129,45 +129,57 @@ class ChannelFilter(OrderedModel):
else:
return self.slack_channel_id
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
term: .*, order: 0, slack notification allowed: Yes, telegram notification allowed: Yes,
slack channel: without_amixr_general_channel, telegram channel: default
"""
result = (
f"term: {self.str_for_clients}, order: {self.order}, slack notification allowed: "
f"{'Yes' if self.notify_in_slack else 'No'}, telegram notification allowed: "
f"{'Yes' if self.notify_in_telegram else 'No'}"
)
if self.notification_backends:
for backend_id, backend in self.notification_backends.items():
result += f", {backend_id} notification allowed: {'Yes' if backend.get('enabled') else 'No'}"
slack_channel = None
if self.slack_channel_id:
SlackChannel = apps.get_model("slack", "SlackChannel")
sti = self.alert_receive_channel.organization.slack_team_identity
slack_channel = SlackChannel.objects.filter(slack_team_identity=sti, slack_id=self.slack_channel_id).first()
result += f", slack channel: {slack_channel.name if slack_channel else 'default'}"
result += f", telegram channel: {self.telegram_channel.channel_name if self.telegram_channel else 'default'}"
if self.notification_backends:
for backend_id, backend in self.notification_backends.items():
channel = backend.get("channel_id") or "default"
result += f", {backend_id} channel: {channel}"
result += f", escalation chain: {self.escalation_chain.name if self.escalation_chain else 'not selected'}"
return result
@property
def str_for_clients(self):
if self.filtering_term is None:
return "default"
return str(self.filtering_term).replace("`", "")
@property
def verbal_name_for_clients(self):
return "default route" if self.is_default else f"route `{self.str_for_clients}`"
def send_demo_alert(self):
integration = self.alert_receive_channel
integration.send_demo_alert(force_route_id=self.pk)
# Insight logs
@property
def insight_logs_type_verbal(self):
return "route"
@property
def insight_logs_verbal(self):
return f"{self.str_for_clients} for {self.alert_receive_channel.insight_logs_verbal}"
@property
def insight_logs_serialized(self):
result = {
"filtering_term": self.str_for_clients,
"order": self.order,
"slack_notification_enabled": self.notify_in_slack,
"telegram_notification_enabled": self.notify_in_telegram,
# TODO: use names instead of pks, it's needed to rework messaging backends for that
}
# TODO: use names instead of pks, it's needed to rework messaging backends for that
if self.slack_channel_id:
if self.slack_channel_id:
SlackChannel = apps.get_model("slack", "SlackChannel")
sti = self.alert_receive_channel.organization.slack_team_identity
slack_channel = SlackChannel.objects.filter(
slack_team_identity=sti, slack_id=self.slack_channel_id
).first()
result["slack_channel"] = slack_channel.name
if self.telegram_channel:
result["telegram_channel"] = self.telegram_channel.public_primary_key
if self.escalation_chain:
result["escalation_chain"] = self.escalation_chain.insight_logs_verbal
result["escalation_chain_id"] = self.escalation_chain.public_primary_key
if self.notification_backends:
for backend_id, backend in self.notification_backends.items():
channel = backend.get("channel_id") or "default"
result[backend_id] = channel
return result
@property
def insight_logs_metadata(self):
return {
"integration": self.alert_receive_channel.insight_logs_verbal,
"integration_id": self.alert_receive_channel.public_primary_key,
}

View file

@ -94,19 +94,6 @@ class CustomButton(models.Model):
def hard_delete(self):
super().delete()
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
name: example, team: example, webhook: https://example.com, user: None, password: None,
authorization header: None, data: None
"""
return (
f"name: {self.name}, team: {self.team.name if self.team else 'No team'}, webhook: {self.webhook}, "
f"user: {self.user}, password: {self.password}, authorization header: {self.authorization_header}, "
f"data: {self.data}, forward_whole_payload {self.forward_whole_payload}"
)
def build_post_kwargs(self, alert):
post_kwargs = {}
if self.user and self.password:
@ -148,6 +135,44 @@ class CustomButton(models.Model):
"""
return json.dumps(string)[1:-1]
# Insight logs
@property
def insight_logs_type_verbal(self):
return "outgoing_webhook"
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
result = {
"name": self.name,
"webhook": self.webhook,
"user": self.user,
"password": self.password,
"authorization_header": self.authorization_header,
"data": self.data,
"forward_whole_payload": self.forward_whole_payload,
}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
class EscapeDoubleQuotesDict(dict):
"""

View file

@ -46,10 +46,6 @@ class EscalationChain(models.Model):
def __str__(self):
return f"{self.pk}: {self.name}"
@property
def repr_settings_for_client_side_logging(self):
return f"name: {self.name}, team: {self.team.name if self.team else 'No team'}"
def make_copy(self, copy_name: str):
with transaction.atomic():
copied_chain = EscalationChain.objects.create(
@ -68,3 +64,35 @@ class EscalationChain(models.Model):
escalation_policy.save()
escalation_policy.notify_to_users_queue.set(notify_to_users_queue)
return copied_chain
# Insight logs
@property
def insight_logs_type_verbal(self):
return "escalation_chain"
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
result = {
"name": self.name,
}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result

View file

@ -299,47 +299,6 @@ class EscalationPolicy(OrderedModel):
def step_type_verbal(self):
return self.STEP_CHOICES[self.step][1] if self.step is not None else "Empty"
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
step: 'Notify multiple Users', order: 0, important: No, users: Alex, Bob
Another example:
step: 'Continue escalation only if time is from', order: 4, from time: 09:40:00 (UTC), to time: 15:40:00 (UTC)
"""
result = f"step: '{self.step_type_verbal}', order: {self.order}"
if self.step not in EscalationPolicy.STEPS_WITH_NO_IMPORTANT_VERSION_SET:
result += f", important: {'Yes' if self.step in EscalationPolicy.IMPORTANT_STEPS_SET else 'No'}"
if self.step == EscalationPolicy.STEP_WAIT:
result += f", wait: {self.get_wait_delay_display() if self.wait_delay else 'default'}"
elif self.step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
result += f", user group: {self.notify_to_group.name if self.notify_to_group else 'not selected'}"
elif self.step in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
result += f", on-call schedule: {self.notify_schedule.name if self.notify_schedule else 'not selected'}"
elif self.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
result += f", action: {self.custom_button_trigger.name if self.custom_button_trigger else 'not selected'}"
elif self.step in [
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
]:
if self.notify_to_users_queue:
users_verbal = ", ".join([user.username for user in self.sorted_users_queue])
else:
users_verbal = "not selected"
result += f", users: {users_verbal}"
elif self.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
if self.from_time:
from_time_verbal = self.from_time.isoformat() + " (UTC)"
else:
from_time_verbal = "not selected"
if self.to_time:
to_time_verbal = self.to_time.isoformat() + " (UTC)"
else:
to_time_verbal = "not selected"
result += f", from time: {from_time_verbal}, to time: {to_time_verbal}"
return result
@property
def sorted_users_queue(self):
return sorted(self.notify_to_users_queue.all(), key=lambda user: (user.username or "", user.pk))
@ -359,3 +318,57 @@ class EscalationPolicy(OrderedModel):
step_name = step_choice[1]
break
return step_name
# Insight logs
@property
def insight_logs_type_verbal(self):
return "escalation_policy"
@property
def insight_logs_verbal(self):
return f"Escalation Policy {self.order} in {self.escalation_chain.insight_logs_verbal}"
@property
def insight_logs_serialized(self):
result = {
"type": self.step_type_verbal,
"order": self.order,
}
if self.step == EscalationPolicy.STEP_WAIT:
if self.wait_delay:
result["wait_delay"] = self.get_wait_delay_display()
elif self.step in [EscalationPolicy.STEP_NOTIFY_GROUP, EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT]:
if self.notify_to_group:
result["user_group"] = self.notify_to_group.name
result["user_group_id"] = self.notify_to_group.public_primary_key
elif self.step in [EscalationPolicy.STEP_NOTIFY_SCHEDULE, EscalationPolicy.STEP_NOTIFY_SCHEDULE_IMPORTANT]:
if self.notify_schedule:
result["on-call_schedule"] = self.notify_schedule.insight_logs_verbal
result["on-call_schedule_id"] = self.notify_schedule.public_primary_key
elif self.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_BUTTON:
if self.custom_button_trigger:
result["outgoing_webhook"] = self.custom_button_trigger.insight_logs_verbal
result["outgoing_webhook_id"] = self.custom_button_trigger.public_primary_key
elif self.step in [
EscalationPolicy.STEP_NOTIFY_USERS_QUEUE,
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS,
EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS_IMPORTANT,
]:
if self.notify_to_users_queue:
result["notify_users"] = [user.username for user in self.sorted_users_queue]
result["notify_users_ids"] = [user.public_primary_key for user in self.sorted_users_queue]
elif self.step == EscalationPolicy.STEP_NOTIFY_IF_TIME:
if self.from_time:
result["from_time"] = self.from_time.isoformat() + " (UTC)"
if self.to_time:
result["to_time"] = self.to_time.isoformat() + " (UTC)"
return result
@property
def insight_logs_metadata(self):
return {
"escalation_chain": self.escalation_chain.insight_logs_verbal,
"escalation_chain_id": self.escalation_chain.public_primary_key,
}

View file

@ -7,8 +7,8 @@ from django.db import models, transaction
from django.utils import timezone
from apps.slack.scenarios.scenario_step import ScenarioStep
from apps.user_management.organization_log_creator import create_organization_log
from common.exceptions import MaintenanceCouldNotBeStartedError
from common.insight_log import MaintenanceEvent, write_maintenance_insight_log
class MaintainableObject(models.Model):
@ -82,7 +82,6 @@ class MaintainableObject(models.Model):
AlertGroup = apps.get_model("alerts", "AlertGroup")
AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
Alert = apps.get_model("alerts", "Alert")
OrganizationLogRecord = apps.get_model("base", "OrganizationLogRecord")
with transaction.atomic():
_self = self.__class__.objects.select_for_update().get(pk=self.pk)
@ -105,6 +104,7 @@ class MaintainableObject(models.Model):
organization=organization,
team=team,
integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE,
author=user,
)
maintenance_uuid = _self.start_disable_maintenance_task(maintenance_duration)
@ -131,7 +131,7 @@ class MaintainableObject(models.Model):
if mode == AlertReceiveChannel.MAINTENANCE:
group = AlertGroup.all_objects.create(
distinction=uuid4(),
verbose_name=f"Maintenance of {verbal} for {maintenance_duration}",
web_title_cache=f"Maintenance of {verbal} for {maintenance_duration}",
maintenance_uuid=maintenance_uuid,
channel_filter_id=maintenance_integration.default_channel_filter.pk,
channel=maintenance_integration,
@ -152,11 +152,7 @@ class MaintainableObject(models.Model):
},
)
alert.save()
# create team log
log_type, object_verbal = OrganizationLogRecord.get_log_type_and_maintainable_object_verbal(self, mode, verbal)
description = f"{self.get_maintenance_mode_display()} of {object_verbal} started for {duration_verbal}"
create_organization_log(organization, user, log_type, description)
write_maintenance_insight_log(self, user, MaintenanceEvent.STARTED)
if mode == AlertReceiveChannel.MAINTENANCE:
self.send_maintenance_incident(organization, group, alert)
self.notify_about_maintenance_action(

View file

@ -1,5 +1,8 @@
from .acknowledge_reminder import acknowledge_reminder_task # noqa: F401
from .cache_alert_group_for_web import cache_alert_group_for_web, schedule_cache_for_alert_group # noqa: F401
from .alert_group_web_title_cache import ( # noqa:F401
update_web_title_cache,
update_web_title_cache_for_alert_receive_channel,
)
from .calculcate_escalation_finish_time import calculate_escalation_finish_time # noqa
from .call_ack_url import call_ack_url # noqa: F401
from .check_escalation_finished import check_escalation_finished_task # noqa: F401
@ -9,7 +12,6 @@ from .custom_button_result import custom_button_result # noqa: F401
from .delete_alert_group import delete_alert_group # noqa: F401
from .distribute_alert import distribute_alert # noqa: F401
from .escalate_alert_group import escalate_alert_group # noqa: F401
from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401, todo: remove
from .invite_user_to_join_incident import invite_user_to_join_incident # noqa: F401
from .maintenance import disable_maintenance # noqa: F401
from .notify_all import notify_all_task # noqa: F401
@ -17,7 +19,6 @@ from .notify_group import notify_group_task # noqa: F401
from .notify_ical_schedule_shift import notify_ical_schedule_shift # noqa: F401
from .notify_user import notify_user_task # noqa: F401
from .resolve_alert_group_by_source_if_needed import resolve_alert_group_by_source_if_needed # noqa: F401
from .resolve_alert_group_if_needed import resolve_alert_group_if_needed # noqa: F401
from .resolve_by_last_step import resolve_by_last_step_task # noqa: F401
from .send_alert_group_signal import send_alert_group_signal # noqa: F401
from .send_update_log_report_signal import send_update_log_report_signal # noqa: F401

View file

@ -0,0 +1,87 @@
from django.db.models import Min
from apps.alerts.incident_appearance.templaters import TemplateLoader
from apps.alerts.tasks.task_logger import task_logger
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
from common.jinja_templater import apply_jinja_template
# BATCH_SIZE is how many alert groups will be processed per second (for every individual alert receive channel)
BATCH_SIZE = 1000
def batch_ids(queryset, cursor):
return list(queryset.filter(id__gt=cursor).order_by("id").values_list("id", flat=True)[:BATCH_SIZE])
@shared_dedicated_queue_retry_task
def update_web_title_cache_for_alert_receive_channel(alert_receive_channel_pk):
"""
Update the web_title_cache field for all alert groups of alert receive channel with pk = alert_receive_channel_pk.
Note that it's not invoked on web title template change due to performance considerations.
"""
task_logger.debug(
f"Starting update_web_title_cache_for_alert_receive_channel, alert_receive_channel_pk: {alert_receive_channel_pk}"
)
from apps.alerts.models import AlertGroup
countdown = 0
cursor = 0
queryset = AlertGroup.all_objects.filter(channel_id=alert_receive_channel_pk)
ids = batch_ids(queryset, cursor)
while ids:
update_web_title_cache.apply_async((alert_receive_channel_pk, ids), countdown=countdown)
cursor = ids[-1]
ids = batch_ids(queryset, cursor)
countdown += 1
@shared_dedicated_queue_retry_task
def update_web_title_cache(alert_receive_channel_pk, alert_group_pks):
"""
Update the web_title_cache field for alert groups with pk in alert_group_pks,
for alert receive channel with pk = alert_receive_channel_pk.
"""
task_logger.debug(
f"Starting update_web_title_cache, alert_receive_channel_pk: {alert_receive_channel_pk}, "
f"first alert_group_pk: {alert_group_pks[0]}, last alert_group_pk: {alert_group_pks[-1]}"
)
from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel
try:
alert_receive_channel = AlertReceiveChannel.objects_with_deleted.get(pk=alert_receive_channel_pk)
except AlertReceiveChannel.DoesNotExist:
task_logger.warning(f"AlertReceiveChannel {alert_receive_channel_pk} doesn't exist")
return
alert_groups = AlertGroup.all_objects.filter(pk__in=alert_group_pks).only("pk")
# get first alerts in 2 SQL queries
alerts_info = (
Alert.objects.values("group_id").filter(group_id__in=alert_group_pks).annotate(first_alert_id=Min("id"))
)
alerts_info_map = {info["group_id"]: info for info in alerts_info}
first_alert_ids = [info["first_alert_id"] for info in alerts_info_map.values()]
first_alerts = Alert.objects.filter(pk__in=first_alert_ids).values("group_id", "raw_request_data")
first_alert_map = {alert["group_id"]: alert for alert in first_alerts}
template_manager = TemplateLoader()
web_title_template = template_manager.get_attr_template("title", alert_receive_channel, render_for="web")
for alert_group in alert_groups:
if web_title_template:
if alert_group.pk in first_alert_map:
raw_request_data = first_alert_map[alert_group.pk]["raw_request_data"]
web_title_cache = apply_jinja_template(web_title_template, raw_request_data)[0] or None
else:
web_title_cache = None
else:
web_title_cache = None
alert_group.web_title_cache = web_title_cache
AlertGroup.all_objects.bulk_update(alert_groups, ["web_title_cache"])

View file

@ -1,19 +0,0 @@
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def schedule_cache_for_alert_group(alert_group_pk):
# todo: remove
pass
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def cache_alert_group_for_web(alert_group_pk):
# todo: remove
pass

View file

@ -30,13 +30,34 @@ def call_ack_url(ack_url, alert_group_pk, channel, http_method="GET"):
else None
)
text = "{}".format(debug_message)
footer = "{}".format(info_message)
blocks = [
{
"type": "section",
"block_id": "alert",
"text": {
"type": "mrkdwn",
"text": text,
},
},
{"type": "divider"},
{
"type": "section",
"block_id": "alert",
"text": {
"type": "mrkdwn",
"text": footer,
},
},
]
if channel is not None:
result = sc.api_call(
"chat.postMessage",
channel=channel,
attachments=[
{"callback_id": "alert", "text": "{}".format(debug_message), "footer": "{}".format(info_message)},
],
text=text,
blocks=blocks,
thread_ts=alert_group.slack_message.slack_id,
mrkdwn=True,
)

View file

@ -48,11 +48,9 @@ def custom_button_result(custom_button_pk, alert_group_pk, user_pk=None, escalat
except TemplateError:
is_request_successful = False
result_message = "Template error"
except json.JSONDecodeError as e:
task_logger.error(
f"Failed to send build_post_kwargs for alert_group {alert_group_pk}, " f"custom_button {custom_button_pk}"
)
raise e
except json.JSONDecodeError:
is_request_successful = False
result_message = "JSON decoding error"
else:
is_request_successful, result_message = request_outgoing_webhook(
custom_button.webhook, "POST", post_kwargs=post_kwargs

View file

@ -1,11 +0,0 @@
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def invalidate_web_cache_for_alert_group(org_pk=None, channel_pk=None, alert_group_pk=None, alert_group_pks=None):
# todo: remove
pass

View file

@ -4,8 +4,8 @@ from django.db import transaction
from django.db.models import ExpressionWrapper, F, fields
from django.utils import timezone
from apps.user_management.organization_log_creator import create_organization_log
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
from common.insight_log import MaintenanceEvent, write_maintenance_insight_log
from .task_logger import task_logger
@ -15,7 +15,6 @@ from .task_logger import task_logger
)
def disable_maintenance(*args, **kwargs):
AlertGroup = apps.get_model("alerts", "AlertGroup")
OrganizationLogRecord = apps.get_model("base", "OrganizationLogRecord")
User = apps.get_model("user_management", "User")
Organization = apps.get_model("user_management", "Organization")
user = None
@ -25,7 +24,6 @@ def disable_maintenance(*args, **kwargs):
user = User.objects.get(pk=user_id)
force = kwargs.get("force", False)
with transaction.atomic():
if "alert_receive_channel_id" in kwargs:
AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
@ -52,23 +50,8 @@ def disable_maintenance(*args, **kwargs):
if object_under_maintenance is not None and (
disable_maintenance.request.id == object_under_maintenance.maintenance_uuid or force
):
verbal = object_under_maintenance.get_verbal()
log_type, object_verbal = OrganizationLogRecord.get_log_type_and_maintainable_object_verbal(
object_under_maintenance,
object_under_maintenance.maintenance_mode,
verbal,
stopped=True,
)
description = (
f"{object_under_maintenance.get_maintenance_mode_display()} of {object_verbal} "
f"stopped{' by user' if user else ''}"
)
organization = (
object_under_maintenance
if isinstance(object_under_maintenance, Organization)
else object_under_maintenance.organization
)
create_organization_log(organization, user, log_type, description)
organization = object_under_maintenance.get_organization()
write_maintenance_insight_log(object_under_maintenance, user, MaintenanceEvent.FINISHED)
if object_under_maintenance.maintenance_mode == object_under_maintenance.MAINTENANCE:
mode_verbal = "Maintenance"
maintenance_incident = AlertGroup.all_objects.get(
@ -82,7 +65,7 @@ def disable_maintenance(*args, **kwargs):
if organization.slack_team_identity:
transaction.on_commit(
lambda: object_under_maintenance.notify_about_maintenance_action(
f"{mode_verbal} of {verbal} finished."
f"{mode_verbal} of {object_under_maintenance.get_verbal()} finished."
)
)

View file

@ -58,16 +58,20 @@ def notify_group_task(alert_group_pk, escalation_policy_snapshot_order=None):
if not user.is_notification_allowed:
continue
notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
notification_policies = UserNotificationPolicy.objects.filter(
user=user,
important=escalation_policy_step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
)
usergroup_notification_plan += "\n_{} (".format(
step.get_user_notification_message_for_thread_for_usergroup(user, notification_policies.first())
)
if notification_policies:
usergroup_notification_plan += "\n_{} (".format(
step.get_user_notification_message_for_thread_for_usergroup(user, notification_policies.first())
)
notification_channels = []
if notification_policies.filter(step=UserNotificationPolicy.Step.NOTIFY).count() == 0:
usergroup_notification_plan += "Empty notifications"
for notification_policy in notification_policies:
if notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
notification_channels.append(

View file

@ -73,9 +73,12 @@ def notify_user_task(
user_has_notification = UserHasNotification.objects.filter(pk=user_has_notification.pk).select_for_update()[0]
if previous_notification_policy_pk is None:
notification_policy = UserNotificationPolicy.objects.get_or_create_for_user(
user=user, important=important
).first()
notification_policy = UserNotificationPolicy.objects.filter(user=user, important=important).first()
if notification_policy is None:
task_logger.info(
f"notify_user_task: Failed to notify. No notification policies. user_id={user_pk} alert_group_id={alert_group_pk} important={important}"
)
return
# Here we collect a brief overview of notification steps configured for user to send it to thread.
collected_steps_ids = []
next_notification_policy = notification_policy.next()

View file

@ -1,31 +0,0 @@
# TODO: remove this file when all the resolve_alert_group_if_needed are processed
# New version - apps.alerts.tasks.resolve_alert_group_by_source_if_needed.resolve_alert_group_by_source_if_needed
from django.apps import apps
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def resolve_alert_group_if_needed(alert_id):
"""
The purpose of this task is to avoid computation-heavy check after each alert.
Should be delayed and invoked only for the last one.
"""
AlertGroupForAlertManager = apps.get_model("alerts", "AlertGroupForAlertManager")
AlertForAlertManager = apps.get_model("alerts", "AlertForAlertManager")
alert = AlertForAlertManager.objects.get(pk=alert_id)
if not resolve_alert_group_if_needed.request.id == alert.group.active_resolve_calculation_id:
return "Resolve calculation celery ID mismatch. Duplication or non-active. Active: {}".format(
alert.group.active_resolve_calculation_id
)
else:
# Retrieving group again to have an access to child class methods
alert_group = AlertGroupForAlertManager.all_objects.get(pk=alert.group_id)
if alert_group.is_alert_a_resolve_signal(alert):
alert_group.resolve_by_source()
return f"resolved alert_group {alert_group.pk}"

View file

@ -33,29 +33,6 @@ def test_channel_filter_select_filter(make_organization, make_alert_receive_chan
assert satisfied_filter == channel_filter
@pytest.mark.django_db
def test_channel_filter_notification_backends_repr(make_organization, make_alert_receive_channel, make_channel_filter):
organization = make_organization()
alert_receive_channel = make_alert_receive_channel(organization)
# extra backend is enabled
channel_filter = make_channel_filter(
alert_receive_channel,
notification_backends={"BACKEND": {"channel_id": "foobar", "enabled": True}},
)
assert "BACKEND notification allowed: Yes" in channel_filter.repr_settings_for_client_side_logging
assert "BACKEND channel: foobar" in channel_filter.repr_settings_for_client_side_logging
# backend is disabled
channel_filter_disabled_backend = make_channel_filter(
alert_receive_channel,
notification_backends={"BACKEND": {"channel_id": "foobar", "enabled": False}},
)
assert "BACKEND notification allowed: No" in channel_filter_disabled_backend.repr_settings_for_client_side_logging
assert "BACKEND channel: foobar" in channel_filter_disabled_backend.repr_settings_for_client_side_logging
@mock.patch("apps.integrations.tasks.create_alert.apply_async", return_value=None)
@pytest.mark.django_db
def test_send_demo_alert(

View file

@ -92,7 +92,6 @@ def test_render_group_data_templates(
assert group_data.group_distinction == template_module.tests.get("group_distinction")
assert group_data.is_resolve_signal == template_module.tests.get("is_resolve_signal")
assert group_data.is_acknowledge_signal == template_module.tests.get("is_acknowledge_signal")
assert group_data.group_verbose_name == template_module.tests.get("group_verbose_name")
def test_default_templates_are_valid():

View file

@ -22,7 +22,7 @@ def test_start_maintenance_integration(
organization, user = maintenance_test_setup
alert_receive_channel = make_alert_receive_channel(
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, author=user
)
mode = AlertReceiveChannel.MAINTENANCE
duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
@ -43,11 +43,13 @@ def test_start_maintenance_integration_multiple_previous_instances(
organization, user = maintenance_test_setup
alert_receive_channel = make_alert_receive_channel(
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, author=user
)
# 2 maintenance integrations were created in the past
for i in range(2):
AlertReceiveChannel.create(organization=organization, integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
AlertReceiveChannel.create(
organization=organization, integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE, author=user
)
mode = AlertReceiveChannel.MAINTENANCE
duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds
@ -68,7 +70,7 @@ def test_maintenance_integration_will_not_start_twice(
organization, user = maintenance_test_setup
alert_receive_channel = make_alert_receive_channel(
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA
organization, integration=AlertReceiveChannel.INTEGRATION_GRAFANA, author=user
)
mode = AlertReceiveChannel.MAINTENANCE
duration = AlertReceiveChannel.DURATION_ONE_HOUR.seconds

View file

@ -5,14 +5,13 @@ from apps.alerts.models import Alert
class AlertSerializer(serializers.ModelSerializer):
id = serializers.CharField(read_only=True, source="public_primary_key")
render_for_web = serializers.SerializerMethodField()
class Meta:
model = Alert
fields = [
"title",
"message",
"image_url",
"id",
"link_to_upstream_details",
"render_for_web",
"created_at",
@ -20,3 +19,14 @@ class AlertSerializer(serializers.ModelSerializer):
def get_render_for_web(self, obj):
return AlertWebRenderer(obj).render()
class AlertRawSerializer(serializers.ModelSerializer):
id = serializers.CharField(read_only=True, source="public_primary_key")
class Meta:
model = Alert
fields = [
"id",
"raw_request_data",
]

View file

@ -61,7 +61,6 @@ class AlertGroupListSerializer(EagerLoadingMixin, serializers.ModelSerializer):
"pk",
"alerts_count",
"inside_organization_number",
"verbose_name",
"alert_receive_channel",
"resolved",
"resolved_by",

View file

@ -3,7 +3,6 @@ from datetime import timedelta
import humanize
import pytz
from django.apps import apps
from django.conf import settings
from django.utils import timezone
from rest_framework import fields, serializers
@ -110,25 +109,7 @@ class CurrentOrganizationSerializer(OrganizationSerializer):
def get_limits(self, obj):
user = self.context["request"].user
if not settings.OSS_INSTALLATION:
return obj.notifications_limit_web_report(user)
# show a version warning on OSS installations in case backend and frontend are different versions
frontend_version = self.context["request"].headers.get("X-OnCall-Plugin-Version")
backend_version = settings.VERSION
version_warning = {}
if backend_version and frontend_version and backend_version != frontend_version:
text = (
"Version mismatch! Please make sure you have the same versions of the Grafana OnCall plugin "
"and Grafana OnCall engine, "
"otherwise there could be issues with your Grafana OnCall installation! "
f"Current plugin version: {frontend_version}, current engine version: {backend_version}. "
"Please see the update instructions: "
"https://grafana.com/docs/oncall/latest/open-source/#update-grafana-oncall-oss"
)
version_warning = {"period_title": "Version mismatch", "show_limits_warning": True, "warning_text": text}
return version_warning or obj.notifications_limit_web_report(user)
return obj.notifications_limit_web_report(user)
def get_env_status(self, obj):
LiveSetting.populate_settings_if_needed()
@ -147,7 +128,7 @@ class CurrentOrganizationSerializer(OrganizationSerializer):
else:
verbal_time_saved_by_amixr = None
res = {
result = {
"grouped_percent": obj.cached_grouped_percent,
"alerts_count": obj.cached_alerts_count,
"noise_reduction": obj.cached_noise_reduction,
@ -155,7 +136,7 @@ class CurrentOrganizationSerializer(OrganizationSerializer):
"verbal_time_saved_by_amixr": verbal_time_saved_by_amixr,
}
return res
return result
def update(self, instance, validated_data):
current_archive_date = instance.archive_alerts_from

View file

@ -1,38 +0,0 @@
from emoji import emojize
from rest_framework import serializers
from apps.base.models import OrganizationLogRecord
from common.api_helpers.mixins import EagerLoadingMixin
class OrganizationLogRecordSerializer(EagerLoadingMixin, serializers.ModelSerializer):
id = serializers.CharField(read_only=True, source="public_primary_key")
author = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
class Meta:
model = OrganizationLogRecord
fields = [
"id",
"author",
"created_at",
"description",
"labels",
]
read_only_fields = fields.copy()
PREFETCH_RELATED = [
"author__organization",
# "author__slack_user_identities__slack_team_identity__amixr_team",
]
SELECT_RELATED = ["author", "organization"]
def get_author(self, obj):
if obj.author:
user_data = obj.author.short()
return user_data
def get_description(self, obj):
return emojize(obj.description, use_aliases=True).replace("\n", "<br>")

View file

@ -9,6 +9,7 @@ from apps.api.views.features import (
FEATURE_LIVE_SETTINGS,
FEATURE_SLACK,
FEATURE_TELEGRAM,
FEATURE_WEB_SCHEDULES,
)
@ -42,6 +43,7 @@ def test_select_features_all_enabled(
settings.FEATURE_LIVE_SETTINGS_ENABLED = True
settings.FEATURE_GRAFANA_CLOUD_CONNECTION = True
settings.FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = True
settings.FEATURE_WEB_SCHEDULES_ENABLED = True
client = APIClient()
url = reverse("api-internal:features")
response = client.get(url, format="json", **make_user_auth_headers(user, token))
@ -53,6 +55,7 @@ def test_select_features_all_enabled(
FEATURE_GRAFANA_CLOUD_CONNECTION,
FEATURE_LIVE_SETTINGS,
FEATURE_GRAFANA_CLOUD_NOTIFICATIONS,
FEATURE_WEB_SCHEDULES,
]
@ -69,6 +72,7 @@ def test_select_features_all_disabled(
settings.FEATURE_LIVE_SETTINGS_ENABLED = False
settings.FEATURE_GRAFANA_CLOUD_CONNECTION = False
settings.FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = FEATURE_GRAFANA_CLOUD_NOTIFICATIONS
settings.FEATURE_WEB_SCHEDULES_ENABLED = False
client = APIClient()
url = reverse("api-internal:features")
response = client.get(url, format="json", **make_user_auth_headers(user, token))

View file

@ -79,7 +79,14 @@ def test_create_on_call_shift_override(on_call_shift_internal_api_setup, make_us
}
response = client.post(url, data, format="json", **make_user_auth_headers(user1, token))
expected_payload = data | {"id": response.data["id"], "updated_shift": None}
returned_rolling_users = response.data["rolling_users"]
assert len(returned_rolling_users) == 1
assert sorted(returned_rolling_users[0]) == sorted(data["rolling_users"][0])
expected_payload = data | {
"id": response.data["id"],
"updated_shift": None,
"rolling_users": returned_rolling_users,
}
assert response.status_code == status.HTTP_201_CREATED
assert response.json() == expected_payload
@ -1313,6 +1320,83 @@ def test_on_call_shift_preview(
assert returned_events == expected_events
@pytest.mark.django_db
def test_on_call_shift_preview_without_users(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_user_auth_headers,
make_schedule,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
schedule = make_schedule(
organization,
schedule_class=OnCallScheduleWeb,
name="test_web_schedule",
)
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_date = now - timezone.timedelta(days=7)
request_date = start_date
user = make_user_for_organization(organization)
url = "{}?date={}&days={}".format(
reverse("api-internal:oncall_shifts-preview"), request_date.strftime("%Y-%m-%d"), 1
)
shift_start = (start_date + timezone.timedelta(hours=12)).strftime("%Y-%m-%dT%H:%M:%SZ")
shift_end = (start_date + timezone.timedelta(hours=13)).strftime("%Y-%m-%dT%H:%M:%SZ")
shift_data = {
"schedule": schedule.public_primary_key,
"type": CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
"rotation_start": shift_start,
"shift_start": shift_start,
"shift_end": shift_end,
# passing empty users
"rolling_users": [],
"priority_level": 2,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
}
response = client.post(url, shift_data, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
# check rotation events
rotation_events = response.json()["rotation"]
expected_rotation_events = [
{
"calendar_type": OnCallSchedule.TYPE_ICAL_PRIMARY,
"start": shift_start,
"end": shift_end,
"all_day": False,
"is_override": False,
"is_empty": True,
"is_gap": False,
"priority_level": None,
"missing_users": [],
"users": [],
"source": "web",
}
]
# there isn't a saved shift, we don't care/know the temp pk
_ = [r.pop("shift") for r in rotation_events]
assert rotation_events == expected_rotation_events
# check final schedule events
final_events = response.json()["final"]
expected_events = []
returned_events = [
{
"end": e["end"],
"start": e["start"],
"user": e["users"][0]["display_name"] if e["users"] else None,
"is_empty": e["is_empty"],
}
for e in final_events
if not e["is_override"] and not e["is_gap"]
]
assert returned_events == expected_events
@pytest.mark.django_db
def test_on_call_shift_preview_merge_events(
make_organization_and_user_with_plugin_token,

View file

@ -1,242 +0,0 @@
from unittest.mock import patch
import pytest
from django.urls import reverse
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIClient
from apps.base.models import OrganizationLogRecord
from apps.user_management.organization_log_creator import OrganizationLogType
from common.constants.role import Role
@pytest.mark.django_db
@pytest.mark.parametrize(
"role,expected_status",
[
(Role.ADMIN, status.HTTP_200_OK),
(Role.EDITOR, status.HTTP_200_OK),
(Role.VIEWER, status.HTTP_200_OK),
],
)
def test_organization_log_records_permissions(
make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
):
_, user, token = make_organization_and_user_with_plugin_token(role)
client = APIClient()
url = reverse("api-internal:organization_log-list")
with patch(
"apps.api.views.organization_log_record.OrganizationLogRecordView.list",
return_value=Response(
status=status.HTTP_200_OK,
),
):
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == expected_status
@pytest.mark.django_db
@pytest.mark.parametrize(
"role,expected_status",
[
(Role.ADMIN, status.HTTP_200_OK),
(Role.EDITOR, status.HTTP_200_OK),
(Role.VIEWER, status.HTTP_200_OK),
],
)
def test_organization_log_records_filters_permissions(
make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
):
_, user, token = make_organization_and_user_with_plugin_token(role)
client = APIClient()
url = reverse("api-internal:organization_log-filters")
with patch(
"apps.api.views.organization_log_record.OrganizationLogRecordView.filters",
return_value=Response(
status=status.HTTP_200_OK,
),
):
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == expected_status
@pytest.mark.django_db
@pytest.mark.parametrize(
"role,expected_status",
[
(Role.ADMIN, status.HTTP_200_OK),
(Role.EDITOR, status.HTTP_200_OK),
(Role.VIEWER, status.HTTP_200_OK),
],
)
def test_organization_log_records_label_options_permissions(
make_organization_and_user_with_plugin_token, make_user_auth_headers, role, expected_status
):
_, user, token = make_organization_and_user_with_plugin_token(role)
client = APIClient()
url = reverse("api-internal:organization_log-label-options")
with patch(
"apps.api.views.organization_log_record.OrganizationLogRecordView.label_options",
return_value=Response(
status=status.HTTP_200_OK,
),
):
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == expected_status
@pytest.mark.django_db
def test_get_filter_created_at(
make_organization_and_user_with_plugin_token,
make_organization_log_record,
make_user_auth_headers,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
make_organization_log_record(organization, user)
url = reverse("api-internal:organization_log-list")
response = client.get(
url + "?created_at=1970-01-01T00:00:00/2099-01-01T23:59:59",
format="json",
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
@pytest.mark.django_db
def test_get_filter_created_at_empty_result(
make_organization_and_user_with_plugin_token,
make_organization_log_record,
make_user_auth_headers,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
make_organization_log_record(organization, user)
url = reverse("api-internal:organization_log-list")
response = client.get(
f"{url}?created_at=1970-01-01T00:00:00/1970-01-01T23:59:59",
format="json",
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 0
@pytest.mark.django_db
def test_get_filter_created_at_invalid_format(
make_organization_and_user_with_plugin_token,
make_user_auth_headers,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
url = reverse("api-internal:organization_log-list")
response = client.get(f"{url}?created_at=invalid_date_format", format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_get_filter_by_labels(
make_organization_and_user_with_plugin_token,
make_organization_log_record,
make_user_auth_headers,
):
organization, user, token = make_organization_and_user_with_plugin_token()
client = APIClient()
# create log that contains LABEL_SLACK and LABEL_DEFAULT_CHANNEL
make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED)
# create log that contains LABEL_SLACK but does not contain LABEL_DEFAULT_CHANNEL
make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_SLACK_WORKSPACE_DISCONNECTED)
# create log that does not contain labels from search
make_organization_log_record(organization, user, type=OrganizationLogType.TYPE_INTEGRATION_CREATED)
url = reverse("api-internal:organization_log-list")
# search by one label: LABEL_SLACK
response = client.get(
f"{url}?labels={OrganizationLogRecord.LABEL_SLACK}", format="json", **make_user_auth_headers(user, token)
)
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
response_log_labels = [log["labels"] for log in response.data["results"]]
for labels in response_log_labels:
assert OrganizationLogRecord.LABEL_SLACK in labels
# search by two labels: LABEL_SLACK and LABEL_DEFAULT_CHANNEL
response = client.get(
f"{url}?labels={OrganizationLogRecord.LABEL_SLACK}&labels={OrganizationLogRecord.LABEL_DEFAULT_CHANNEL}",
format="json",
**make_user_auth_headers(user, token),
)
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response_log_labels = [log["labels"] for log in response.data["results"]]
for labels in response_log_labels:
assert OrganizationLogRecord.LABEL_SLACK in labels
assert OrganizationLogRecord.LABEL_DEFAULT_CHANNEL in labels
@pytest.mark.django_db
def test_get_filter_author(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_organization_log_record,
make_user_auth_headers,
):
client = APIClient()
organization, first_user, token = make_organization_and_user_with_plugin_token()
second_user = make_user_for_organization(organization)
make_organization_log_record(organization, first_user)
url = reverse("api-internal:organization_log-list")
first_response = client.get(
f"{url}?author={first_user.public_primary_key}", format="json", **make_user_auth_headers(first_user, token)
)
assert first_response.status_code == status.HTTP_200_OK
assert len(first_response.data["results"]) == 1
second_response = client.get(
f"{url}?author={second_user.public_primary_key}", format="json", **make_user_auth_headers(first_user, token)
)
assert second_response.status_code == status.HTTP_200_OK
assert len(second_response.data["results"]) == 0
@pytest.mark.django_db
def test_get_filter_author_multiple_values(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_organization_log_record,
make_user_auth_headers,
):
client = APIClient()
organization, first_user, token = make_organization_and_user_with_plugin_token()
second_user = make_user_for_organization(organization)
third_user = make_user_for_organization(organization)
make_organization_log_record(organization, first_user)
make_organization_log_record(organization, second_user)
url = reverse("api-internal:organization_log-list")
first_response = client.get(
f"{url}?author={first_user.public_primary_key}&author={second_user.public_primary_key}",
format="json",
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert len(first_response.data["results"]) == 2
second_response = client.get(
f"{url}?author={first_user.public_primary_key}&author={third_user.public_primary_key}",
format="json",
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert len(second_response.data["results"]) == 1

View file

@ -912,7 +912,7 @@ def test_merging_same_shift_events(
"is_gap": False,
"priority_level": 1,
"start": start_date + timezone.timedelta(hours=10),
"users": [user_a.username, user_b.username],
"users": sorted([user_a.username, user_b.username]),
"missing_users": [user_c.username],
}
]
@ -929,7 +929,7 @@ def test_merging_same_shift_events(
"is_gap": e["is_gap"],
"priority_level": e["priority_level"],
"start": e["start"],
"users": [u["display_name"] for u in e["users"]] if e["users"] else None,
"users": sorted([u["display_name"] for u in e["users"]]) if e["users"] else None,
"missing_users": e["missing_users"],
}
for e in response.data["events"]
@ -950,7 +950,7 @@ def test_merging_same_shift_events(
"is_gap": e["is_gap"],
"priority_level": e["priority_level"],
"start": e["start"],
"users": [u["display_name"] for u in e["users"]] if e["users"] else None,
"users": sorted([u["display_name"] for u in e["users"]]) if e["users"] else None,
"missing_users": e["missing_users"],
}
for e in response.data["events"]

View file

@ -85,3 +85,198 @@ def test_list_teams_permissions(
response = client.get(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_team_permissions_wrong_team_general(
make_organization,
make_team,
make_alert_group,
make_alert_receive_channel,
make_user,
make_escalation_chain,
make_schedule,
make_custom_action,
make_token_for_organization,
make_user_auth_headers,
):
organization = make_organization()
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
team = make_team(organization)
user.teams.add(team)
user.current_team = team
user.save(update_fields=["current_team"])
alert_receive_channel = make_alert_receive_channel(organization)
alert_group = make_alert_group(alert_receive_channel)
# escalation_chain = make_escalation_chain(organization)
# schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar)
# webhook = make_custom_action(organization)
for endpoint, instance in (
("alertgroup", alert_group),
# todo: implement team filtering for other resources
# ("alert_receive_channel", alert_receive_channel),
# ("escalation_chain", escalation_chain),
# ("schedule", schedule),
# ("custom_button", webhook),
):
client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {
"error_code": "wrong_team",
"owner_team": {"name": "General", "id": None, "email": None, "avatar_url": None},
}
@pytest.mark.django_db
def test_team_permissions_wrong_team(
make_organization,
make_team,
make_alert_group,
make_alert_receive_channel,
make_user,
make_escalation_chain,
make_schedule,
make_custom_action,
make_token_for_organization,
make_user_auth_headers,
):
organization = make_organization()
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
team = make_team(organization)
user.teams.add(team)
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
# escalation_chain = make_escalation_chain(organization, team=team)
# schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
# webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
# todo: implement team filtering for other resources
# ("alert_receive_channel", alert_receive_channel),
# ("escalation_chain", escalation_chain),
# ("schedule", schedule),
# ("custom_button", webhook),
):
client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {
"error_code": "wrong_team",
"owner_team": {
"name": team.name,
"id": team.public_primary_key,
"email": team.email,
"avatar_url": team.avatar_url,
},
}
@pytest.mark.django_db
def test_team_permissions_not_in_team(
make_organization,
make_team,
make_alert_group,
make_alert_receive_channel,
make_user,
make_escalation_chain,
make_schedule,
make_custom_action,
make_token_for_organization,
make_user_auth_headers,
):
organization = make_organization()
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
team = make_team(organization)
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
# escalation_chain = make_escalation_chain(organization, team=team)
# schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
# webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
# todo: implement team filtering for other resources
# ("alert_receive_channel", alert_receive_channel),
# ("escalation_chain", escalation_chain),
# ("schedule", schedule),
# ("custom_button", webhook),
):
client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {"error_code": "wrong_team"}
@pytest.mark.django_db
def test_team_permissions_right_team(
make_organization,
make_team,
make_alert_group,
make_alert_receive_channel,
make_user,
make_escalation_chain,
make_schedule,
make_custom_action,
make_token_for_organization,
make_user_auth_headers,
):
organization = make_organization()
user = make_user(organization=organization)
_, token = make_token_for_organization(organization)
team = make_team(organization)
user.teams.add(team)
user.current_team = team
user.save(update_fields=["current_team"])
alert_receive_channel = make_alert_receive_channel(organization, team=team)
alert_group = make_alert_group(alert_receive_channel)
# escalation_chain = make_escalation_chain(organization, team=team)
# schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar, team=team)
# webhook = make_custom_action(organization, team=team)
for endpoint, instance in (
("alertgroup", alert_group),
# todo: implement team filtering for other resources
# ("alert_receive_channel", alert_receive_channel),
# ("escalation_chain", escalation_chain),
# ("schedule", schedule),
# ("custom_button", webhook),
):
client = APIClient()
url = reverse(f"api-internal:{endpoint}-detail", kwargs={"pk": instance.public_primary_key})
response = client.get(url, **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK

View file

@ -800,6 +800,30 @@ def test_admin_can_unlink_another_user_backend_account(
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_admin_can_unlink_another_user_slack_account(
make_organization_with_slack_team_identity,
make_user_for_organization,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
admin = make_user_for_organization(organization, role=Role.ADMIN)
editor, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": editor.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(admin, token))
assert response.status_code == status.HTTP_200_OK
editor.refresh_from_db()
assert editor.slack_user_identity is None
"""Test user permissions"""
@ -1038,6 +1062,28 @@ def test_user_cant_get_another_user_backend_verification_code(
assert response.status_code == status.HTTP_403_FORBIDDEN
@pytest.mark.django_db
def test_user_can_unlink_own_slack_account(
make_organization_with_slack_team_identity,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
user, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": user.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
user.refresh_from_db()
assert user.slack_user_identity is None
@pytest.mark.django_db
def test_user_can_unlink_backend_own_account(
make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
@ -1086,6 +1132,31 @@ def test_user_unlink_backend_backend_account_not_found(
assert response.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_user_cant_unlink_slack_another_user(
make_organization_with_slack_team_identity,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
first_user, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
second_user, slack_user_identity_2 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_2", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": first_user.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(second_user, token))
assert response.status_code == status.HTTP_403_FORBIDDEN
first_user.refresh_from_db()
assert first_user.slack_user_identity is not None
@pytest.mark.django_db
def test_user_cant_unlink_backend__another_user(
make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers

View file

@ -1,5 +1,5 @@
from django.conf import settings
from django.urls import include, path
from django.urls import include, path, re_path
from common.api_helpers.optional_slash_router import OptionalSlashRouter, optional_slash_path
@ -7,6 +7,7 @@ from .views import UserNotificationPolicyView, auth
from .views.alert_group import AlertGroupView
from .views.alert_receive_channel import AlertReceiveChannelView
from .views.alert_receive_channel_template import AlertReceiveChannelTemplateView
from .views.alerts import AlertDetailView
from .views.apns_device import APNSDeviceAuthorizedViewSet
from .views.channel_filter import ChannelFilterView
from .views.custom_button import CustomButtonView
@ -24,7 +25,6 @@ from .views.organization import (
GetTelegramVerificationCode,
SetGeneralChannel,
)
from .views.organization_log_record import OrganizationLogRecordView
from .views.preview_template_options import PreviewTemplateOptionsView
from .views.public_api_tokens import PublicApiTokenView
from .views.resolution_note import ResolutionNoteView
@ -64,7 +64,6 @@ router.register(r"telegram_channels", TelegramChannelViewSet, basename="telegram
router.register(r"slack_channels", SlackChannelView, basename="slack_channel")
router.register(r"user_groups", UserGroupViewSet, basename="user_group")
router.register(r"heartbeats", IntegrationHeartBeatView, basename="integration_heartbeat")
router.register(r"organization_logs", OrganizationLogRecordView, basename="organization_log")
router.register(r"tokens", PublicApiTokenView, basename="api_token")
router.register(r"live_settings", LiveSettingViewSet, basename="live_settings")
router.register(r"oncall_shifts", OnCallShiftView, basename="oncall_shifts")
@ -110,6 +109,7 @@ urlpatterns = [
),
optional_slash_path("route_regex_debugger", RouteRegexDebuggerView.as_view(), name="route_regex_debugger"),
optional_slash_path("insight_logs_test", TestInsightLogsAPIView.as_view(), name="insight-logs-test"),
re_path(r"^alerts/(?P<id>\w+)/?$", AlertDetailView.as_view(), name="alerts-detail"),
]
urlpatterns += [

View file

@ -18,7 +18,7 @@ from apps.auth_token.auth import MobileAppAuthTokenAuthentication, PluginAuthent
from apps.user_management.models import User
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
from common.api_helpers.mixins import PreviewTemplateMixin, PublicPrimaryKeyMixin
from common.api_helpers.mixins import PreviewTemplateMixin, PublicPrimaryKeyMixin, TeamFilteringMixin
from common.api_helpers.paginators import TwentyFiveCursorPaginator
@ -143,8 +143,13 @@ class AlertGroupFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.Filt
return queryset
class AlertGroupTeamFilteringMixin(TeamFilteringMixin):
TEAM_LOOKUP = "channel__team"
class AlertGroupView(
PreviewTemplateMixin,
AlertGroupTeamFilteringMixin,
PublicPrimaryKeyMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
@ -186,8 +191,7 @@ class AlertGroupView(
pagination_class = TwentyFiveCursorPaginator
filter_backends = [SearchFilter, filters.DjangoFilterBackend]
# todo: add ability to search by templated title
search_fields = ["public_primary_key", "inside_organization_number"]
search_fields = ["public_primary_key", "inside_organization_number", "web_title_cache"]
filterset_class = AlertGroupFilter

View file

@ -17,7 +17,6 @@ from apps.api.serializers.alert_receive_channel import (
)
from apps.api.throttlers import DemoAlertThrottler
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import (
FilterSerializerMixin,
@ -26,6 +25,7 @@ from common.api_helpers.mixins import (
UpdateSerializerMixin,
)
from common.exceptions import TeamCanNotBeChangedError, UnableToSendDemoAlert
from common.insight_log import EntityEvent, write_resource_insight_log
class AlertReceiveChannelFilter(filters.FilterSet):
@ -96,21 +96,22 @@ class AlertReceiveChannelView(
return Response(data="invalid integration", status=status.HTTP_400_BAD_REQUEST)
def perform_update(self, serializer):
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
serializer.instance.organization,
self.request.user,
OrganizationLogType.TYPE_INTEGRATION_CHANGED,
description,
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
description = f"Integration {instance.verbal_name} was deleted"
create_organization_log(
instance.organization, self.request.user, OrganizationLogType.TYPE_INTEGRATION_DELETED, description
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()

View file

@ -5,8 +5,8 @@ from apps.alerts.models import AlertReceiveChannel
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
from apps.api.serializers.alert_receive_channel import AlertReceiveChannelTemplatesSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.mixins import PublicPrimaryKeyMixin
from common.insight_log import EntityEvent, write_resource_insight_log
class AlertReceiveChannelTemplateView(
@ -35,18 +35,15 @@ class AlertReceiveChannelTemplateView(
def update(self, request, *args, **kwargs):
instance = self.get_object()
old_state = instance.repr_settings_for_client_side_logging
prev_state = instance.insight_logs_serialized
result = super().update(request, *args, **kwargs)
instance = self.get_object()
new_state = instance.repr_settings_for_client_side_logging
if new_state != old_state:
description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_INTEGRATION_CHANGED,
description,
)
new_state = instance.insight_logs_serialized
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return result

View file

@ -0,0 +1,23 @@
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.alerts.models import Alert
from apps.api.serializers.alert import AlertRawSerializer
from apps.auth_token.auth import PluginAuthentication
class AlertDetailView(APIView):
authentication_classes = [PluginAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, id):
try:
alert = Alert.objects.get(public_primary_key=id)
except Alert.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if alert.group.channel.organization != request.auth.organization:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = AlertRawSerializer(alert)
return Response(serializer.data)

View file

@ -15,10 +15,10 @@ from apps.api.serializers.channel_filter import (
from apps.api.throttlers import DemoAlertThrottler
from apps.auth_token.auth import PluginAuthentication
from apps.slack.models import SlackChannel
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import CreateSerializerMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin
from common.exceptions import UnableToSendDemoAlert
from common.insight_log import EntityEvent, write_resource_insight_log
class ChannelFilterView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet):
@ -59,70 +59,59 @@ class ChannelFilterView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSeri
return queryset
def destroy(self, request, *args, **kwargs):
user = request.user
instance = self.get_object()
if instance.is_default:
raise BadRequest(detail="Unable to delete default filter")
else:
alert_receive_channel = instance.alert_receive_channel
route_verbal = instance.verbal_name_for_clients.capitalize()
description = f"{route_verbal} for integration {alert_receive_channel.verbal_name} was deleted"
create_organization_log(
user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED, description
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_create(self, serializer):
user = self.request.user
serializer.save()
instance = serializer.instance
alert_receive_channel = instance.alert_receive_channel
route_verbal = instance.verbal_name_for_clients.capitalize()
description = f"{route_verbal} was created for integration {alert_receive_channel.verbal_name}"
create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED, description)
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
alert_receive_channel = serializer.instance.alert_receive_channel
route_verbal = serializer.instance.verbal_name_for_clients
description = (
f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
f"was changed from:\n{old_state}\nto:\n{new_state}"
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)
@action(detail=True, methods=["put"])
def move_to_position(self, request, pk):
position = request.query_params.get("position", None)
if position is not None:
try:
source_filter = ChannelFilter.objects.get(public_primary_key=pk)
instance = ChannelFilter.objects.get(public_primary_key=pk)
except ChannelFilter.DoesNotExist:
raise BadRequest(detail="Channel filter does not exist")
try:
if source_filter.is_default:
if instance.is_default:
raise BadRequest(detail="Unable to change position for default filter")
user = self.request.user
old_state = source_filter.repr_settings_for_client_side_logging
prev_state = instance.insight_logs_serialized
instance.to(int(position))
new_state = instance.insight_logs_serialized
source_filter.to(int(position))
new_state = source_filter.repr_settings_for_client_side_logging
alert_receive_channel = source_filter.alert_receive_channel
route_verbal = source_filter.verbal_name_for_clients
description = (
f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
f"was changed from:\n{old_state}\nto:\n{new_state}"
)
create_organization_log(
user.organization,
user,
OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED,
description,
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(status=status.HTTP_200_OK)
except ValueError as e:

View file

@ -11,9 +11,9 @@ from apps.alerts.tasks.custom_button_result import custom_button_result
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin, IsAdminOrEditor
from apps.api.serializers.custom_button import CustomButtonSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import PublicPrimaryKeyMixin
from common.insight_log import EntityEvent, write_resource_insight_log
class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
@ -55,26 +55,30 @@ class CustomButtonView(PublicPrimaryKeyMixin, ModelViewSet):
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED, description)
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Custom action {serializer.instance.name} was changed " f"from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED, description)
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED, description)
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()
@action(detail=True, methods=["post"])

View file

@ -10,9 +10,9 @@ from apps.alerts.models import EscalationChain
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
from apps.api.serializers.escalation_chain import EscalationChainListSerializer, EscalationChainSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import ListSerializerMixin, PublicPrimaryKeyMixin
from common.insight_log import EntityEvent, write_resource_insight_log
class EscalationChainViewSet(PublicPrimaryKeyMixin, ListSerializerMixin, viewsets.ModelViewSet):
@ -56,45 +56,31 @@ class EscalationChainViewSet(PublicPrimaryKeyMixin, ListSerializerMixin, viewset
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
description = f"Escalation chain {instance.name} was created"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED,
description,
)
write_resource_insight_log(instance=serializer.instance, author=self.request.user, event=EntityEvent.CREATED)
def perform_destroy(self, instance):
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()
description = f"Escalation chain {instance.name} was deleted"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED,
description,
)
def perform_update(self, serializer):
instance = serializer.instance
old_state = instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.insight_logs_serialized
new_state = instance.repr_settings_for_client_side_logging
description = f"Escalation chain {instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED,
description,
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
@action(methods=["post"], detail=True)
def copy(self, request, pk):
user = request.user
name = request.data.get("name")
if name is None:
raise BadRequest(detail={"name": ["This field may not be null."]})
@ -105,8 +91,11 @@ class EscalationChainViewSet(PublicPrimaryKeyMixin, ListSerializerMixin, viewset
obj = self.get_object()
copy = obj.make_copy(name)
serializer = self.get_serializer(copy)
description = f"Escalation chain {obj.name} was copied with new name {name}"
create_organization_log(copy.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)
write_resource_insight_log(
instance=copy,
author=self.request.user,
event=EntityEvent.CREATED,
)
return Response(serializer.data)
@action(methods=["get"], detail=True)

View file

@ -14,9 +14,9 @@ from apps.api.serializers.escalation_policy import (
EscalationPolicyUpdateSerializer,
)
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import CreateSerializerMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin
from common.insight_log import EntityEvent, write_resource_insight_log
class EscalationPolicyView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateSerializerMixin, ModelViewSet):
@ -66,37 +66,31 @@ class EscalationPolicyView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateS
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.user.organization
user = self.request.user
description = (
f"Escalation step '{instance.step_type_verbal}' with order {instance.order} "
f"was created for escalation chain '{instance.escalation_chain.name}'"
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CREATED, description)
def perform_update(self, serializer):
organization = self.request.user.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
escalation_chain_name = serializer.instance.escalation_chain.name
new_state = serializer.instance.insight_logs_serialized
description = (
f"Settings for escalation step of escalation chain '{escalation_chain_name}' "
f"was changed from:\n{old_state}\nto:\n{new_state}"
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED, description)
def perform_destroy(self, instance):
organization = self.request.user.organization
user = self.request.user
description = (
f"Escalation step '{instance.step_type_verbal}' with order {instance.order} of "
f"of escalation chain '{instance.escalation_chain.name}' was deleted"
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_DELETED, description)
instance.delete()
@action(detail=True, methods=["put"])
@ -104,29 +98,22 @@ class EscalationPolicyView(PublicPrimaryKeyMixin, CreateSerializerMixin, UpdateS
position = request.query_params.get("position", None)
if position is not None:
try:
source_step = EscalationPolicy.objects.get(public_primary_key=pk)
instance = EscalationPolicy.objects.get(public_primary_key=pk)
except EscalationPolicy.DoesNotExist:
raise BadRequest(detail="Step does not exist")
try:
user = self.request.user
old_state = source_step.repr_settings_for_client_side_logging
prev_state = instance.insight_logs_serialized
position = int(position)
source_step.to(position)
instance.to(position)
new_state = instance.insight_logs_serialized
new_state = source_step.repr_settings_for_client_side_logging
escalation_chain_name = source_step.escalation_chain.name
description = (
f"Settings for escalation step of escalation chain '{escalation_chain_name}' "
f"was changed from:\n{old_state}\nto:\n{new_state}"
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
create_organization_log(
user.organization,
user,
OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED,
description,
)
return Response(status=status.HTTP_200_OK)
except ValueError as e:
raise BadRequest(detail=f"{e}")

View file

@ -12,6 +12,7 @@ FEATURE_LIVE_SETTINGS = "live_settings"
MOBILE_APP_PUSH_NOTIFICATIONS = "mobile_app"
FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = "grafana_cloud_notifications"
FEATURE_GRAFANA_CLOUD_CONNECTION = "grafana_cloud_connection"
FEATURE_WEB_SCHEDULES = "web_schedules"
class FeaturesAPIView(APIView):
@ -56,4 +57,7 @@ class FeaturesAPIView(APIView):
if live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED:
enabled_features.append(FEATURE_GRAFANA_CLOUD_NOTIFICATIONS)
if settings.FEATURE_WEB_SCHEDULES_ENABLED:
enabled_features.append(FEATURE_WEB_SCHEDULES)
return enabled_features

View file

@ -7,8 +7,8 @@ from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission,
from apps.api.serializers.integration_heartbeat import IntegrationHeartBeatSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.heartbeat.models import IntegrationHeartBeat
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.mixins import PublicPrimaryKeyMixin
from common.insight_log import EntityEvent, write_resource_insight_log
class IntegrationHeartBeatView(
@ -45,29 +45,22 @@ class IntegrationHeartBeatView(
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
description = f"Heartbeat for integration {instance.alert_receive_channel.verbal_name} was created"
create_organization_log(
instance.alert_receive_channel.organization,
self.request.user,
OrganizationLogType.TYPE_HEARTBEAT_CREATED,
description,
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
alert_receive_channel = serializer.instance.alert_receive_channel
description = (
f"Settings for heartbeat of integration "
f"{alert_receive_channel.verbal_name} was changed "
f"from:\n{old_state}\nto:\n{new_state}"
)
create_organization_log(
alert_receive_channel.organization,
self.request.user,
OrganizationLogType.TYPE_HEARTBEAT_CHANGED,
description,
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
@action(detail=False, methods=["get"])

View file

@ -10,10 +10,10 @@ from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission,
from apps.api.serializers.on_call_shifts import OnCallShiftSerializer, OnCallShiftUpdateSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.schedules.models import CustomOnCallShift
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.mixins import PublicPrimaryKeyMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.api_helpers.utils import get_date_range_from_request
from common.insight_log import EntityEvent, write_resource_insight_log
class OnCallShiftView(PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet):
@ -52,31 +52,30 @@ class OnCallShiftView(PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
description = (
f"Custom on-call shift with params: {instance.repr_settings_for_client_side_logging} "
f"was created" # todo
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CREATED, description)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Settings of custom on-call shift was changed " f"from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CHANGED, description)
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = (
f"Custom on-call shift " f"with params: {instance.repr_settings_for_client_side_logging} was deleted"
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_DELETED, description)
instance.delete()
@action(detail=False, methods=["post"])
@ -90,8 +89,6 @@ class OnCallShiftView(PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet
validated_data = serializer._correct_validated_data(
serializer.validated_data["type"], serializer.validated_data
)
if not validated_data.get("rolling_users"):
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
updated_shift_pk = self.request.data.get("shift_pk")
shift = CustomOnCallShift(**validated_data)

View file

@ -11,7 +11,7 @@ from apps.api.serializers.organization import CurrentOrganizationSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.base.messaging import get_messaging_backend_from_id
from apps.telegram.client import TelegramClient
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.insight_log import EntityEvent, write_resource_insight_log
class CurrentOrganizationView(APIView):
@ -27,16 +27,19 @@ class CurrentOrganizationView(APIView):
def put(self, request):
organization = self.request.auth.organization
old_state = organization.repr_settings_for_client_side_logging
prev_state = organization.insight_logs_serialized
serializer = CurrentOrganizationSerializer(
instance=organization, data=request.data, context={"request": request}
)
serializer.is_valid(raise_exception=True)
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Organization settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization, request.user, OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED, description
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(serializer.data)

View file

@ -1,128 +0,0 @@
from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from django_filters import rest_framework as filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import mixins, viewsets
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.api.serializers.organization_log_record import OrganizationLogRecordSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.base.models import OrganizationLogRecord
from apps.user_management.models import User
from common.api_helpers.filters import DateRangeFilterMixin, ModelFieldFilterMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
LABEL_CHOICES = [[label, label] for label in OrganizationLogRecord.LABELS]
def get_user_queryset(request):
if request is None:
return User.objects.none()
return User.objects.filter(organization=request.user.organization).distinct()
class OrganizationLogRecordFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.FilterSet):
author = filters.ModelMultipleChoiceFilter(
field_name="author",
queryset=get_user_queryset,
to_field_name="public_primary_key",
method=ModelFieldFilterMixin.filter_model_field.__name__,
)
created_at = filters.CharFilter(field_name="created_at", method=DateRangeFilterMixin.filter_date_range.__name__)
labels = filters.MultipleChoiceFilter(choices=LABEL_CHOICES, method="filter_labels")
class Meta:
model = OrganizationLogRecord
fields = ["author", "labels", "created_at"]
def filter_labels(self, queryset, name, value):
if not value:
return queryset
q_objects = Q()
for item in value:
q_objects &= Q(_labels__contains=item)
queryset = queryset.filter(q_objects)
return queryset
class OrganizationLogRecordView(mixins.ListModelMixin, viewsets.GenericViewSet):
authentication_classes = (PluginAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = OrganizationLogRecordSerializer
pagination_class = FiftyPageSizePaginator
filter_backends = (
SearchFilter,
DjangoFilterBackend,
)
search_fields = ("description",)
filterset_class = OrganizationLogRecordFilter
def get_queryset(self):
queryset = OrganizationLogRecord.objects.filter(organization=self.request.auth.organization).order_by(
"-created_at"
)
queryset = self.serializer_class.setup_eager_loading(queryset)
return queryset
@action(detail=False, methods=["get"])
def filters(self, request):
filter_name = request.query_params.get("filter_name", None)
api_root = "/api/internal/v1/"
filter_options = [
{
"name": "search",
"type": "search",
},
{
"name": "author",
"type": "options",
"href": api_root + "users/?filters=true&roles=0&roles=1&roles=2",
},
{
"name": "labels",
"type": "options",
"options": [
{
"display_name": label,
"value": label,
}
for label in OrganizationLogRecord.LABELS
],
},
{
"name": "created_at",
"type": "daterange",
"default": f"{timezone.datetime.now() - timedelta(days=7):%Y-%m-%d/{timezone.datetime.now():%Y-%m-%d}}",
},
]
if filter_name is not None:
filter_options = list(filter(lambda f: f["name"].startswith(filter_name), filter_options))
return Response(filter_options)
@action(detail=False, methods=["get"])
def label_options(self, request):
return Response(
[
{
"display_name": label,
"value": label,
}
for label in OrganizationLogRecord.LABELS
]
)

View file

@ -7,8 +7,8 @@ from apps.api.serializers.public_api_token import PublicApiTokenSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.auth_token.constants import MAX_PUBLIC_API_TOKENS_PER_USER
from apps.auth_token.models import ApiAuthToken
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.insight_log import EntityEvent, write_resource_insight_log
class PublicApiTokenView(
@ -30,10 +30,8 @@ class PublicApiTokenView(
return ApiAuthToken.objects.filter(user=self.request.user, organization=self.request.user.organization)
def destroy(self, request, *args, **kwargs):
user = request.user
instance = self.get_object()
description = f"API token {instance.name} was revoked"
create_organization_log(user.organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED, description)
write_resource_insight_log(instance=instance, author=request.user, event=EntityEvent.DELETED)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
@ -51,5 +49,5 @@ class PublicApiTokenView(
raise BadRequest("Invalid token name")
instance, token = ApiAuthToken.create_auth_token(user, user.organization, token_name)
data = {"id": instance.pk, "token": token, "name": instance.name, "created_at": instance.created_at}
write_resource_insight_log(instance=instance, author=user, event=EntityEvent.CREATED)
return Response(data, status=status.HTTP_201_CREATED)

View file

@ -25,7 +25,6 @@ from apps.auth_token.models import ScheduleExportAuthToken
from apps.schedules.models import OnCallSchedule
from apps.slack.models import SlackChannel
from apps.slack.tasks import update_slack_user_group_for_schedules
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest, Conflict
from common.api_helpers.mixins import (
CreateSerializerMixin,
@ -34,6 +33,7 @@ from common.api_helpers.mixins import (
UpdateSerializerMixin,
)
from common.api_helpers.utils import create_engine_url, get_date_range_from_request
from common.insight_log import EntityEvent, write_resource_insight_log
EVENTS_FILTER_BY_ROTATION = "rotation"
EVENTS_FILTER_BY_OVERRIDE = "override"
@ -136,38 +136,32 @@ class ScheduleView(
return super().get_object()
def perform_create(self, serializer):
schedule = serializer.save()
if schedule.user_group is not None:
update_slack_user_group_for_schedules.apply_async((schedule.user_group.pk,))
organization = self.request.auth.organization
user = self.request.user
description = f"Schedule {schedule.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CREATED, description)
serializer.save()
write_resource_insight_log(instance=serializer.instance, author=self.request.user, event=EntityEvent.CREATED)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_schedule = serializer.instance
old_state = old_schedule.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
old_user_group = serializer.instance.user_group
updated_schedule = serializer.save()
serializer.save()
if old_user_group is not None:
update_slack_user_group_for_schedules.apply_async((old_user_group.pk,))
if updated_schedule.user_group is not None and updated_schedule.user_group != old_user_group:
update_slack_user_group_for_schedules.apply_async((updated_schedule.user_group.pk,))
new_state = updated_schedule.repr_settings_for_client_side_logging
description = f"Schedule {updated_schedule.name} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CHANGED, description)
if serializer.instance.user_group is not None and serializer.instance.user_group != old_user_group:
update_slack_user_group_for_schedules.apply_async((serializer.instance.user_group.pk,))
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = f"Schedule {instance.name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_DELETED, description)
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()
if instance.user_group is not None:
@ -309,6 +303,7 @@ class ScheduleView(
instance, token = ScheduleExportAuthToken.create_auth_token(
request.user, request.user.organization, schedule
)
write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.CREATED)
except IntegrityError:
raise Conflict("Schedule export token for user already exists")
@ -324,6 +319,7 @@ class ScheduleView(
if self.request.method == "DELETE":
try:
token = ScheduleExportAuthToken.objects.get(user_id=self.request.user.id, schedule_id=schedule.id)
write_resource_insight_log(instance=token, author=self.request.user, event=EntityEvent.DELETED)
token.delete()
except ScheduleExportAuthToken.DoesNotExist:
raise NotFound

View file

@ -6,7 +6,7 @@ from apps.api.permissions import AnyRole, IsAdmin, MethodPermission
from apps.api.serializers.organization_slack_settings import OrganizationSlackSettingsSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.models import Organization
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.insight_log import EntityEvent, write_resource_insight_log
class SlackTeamSettingsAPIView(views.APIView):
@ -27,14 +27,17 @@ class SlackTeamSettingsAPIView(views.APIView):
def put(self, request):
organization = self.request.auth.organization
old_state = organization.repr_settings_for_client_side_logging
prev_state = organization.insight_logs_serialized
serializer = self.serializer_class(organization, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Organization settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization, request.user, OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED, description
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(serializer.data)

View file

@ -7,8 +7,8 @@ from rest_framework.response import Response
from apps.api.permissions import MODIFY_ACTIONS, READ_ACTIONS, ActionPermission, AnyRole, IsAdmin
from apps.api.serializers.telegram import TelegramToOrganizationConnectorSerializer
from apps.auth_token.auth import PluginAuthentication
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.mixins import PublicPrimaryKeyMixin
from common.insight_log.chatops_insight_logs import ChatOpsEvent, ChatOpsType, write_chatops_insight_log
class TelegramChannelViewSet(
@ -41,8 +41,10 @@ class TelegramChannelViewSet(
def perform_destroy(self, instance):
user = self.request.user
organization = user.organization
description = f"Telegram channel @{instance.channel_name} was disconnected from organization"
create_organization_log(organization, user, OrganizationLogType.TYPE_TELEGRAM_CHANNEL_DISCONNECTED, description)
write_chatops_insight_log(
author=user,
event_name=ChatOpsEvent.CHANNEL_DISCONNECTED,
chatops_type=ChatOpsType.TELEGRAM,
channel_name=instance.channel_name,
)
instance.delete()

View file

@ -40,12 +40,18 @@ from apps.telegram.models import TelegramVerificationCode
from apps.twilioapp.phone_manager import PhoneManager
from apps.twilioapp.twilio_client import twilio_client
from apps.user_management.models import User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import Conflict
from common.api_helpers.mixins import FilterSerializerMixin, PublicPrimaryKeyMixin
from common.api_helpers.paginators import HundredPageSizePaginator
from common.api_helpers.utils import create_engine_url
from common.constants.role import Role
from common.insight_log import (
ChatOpsEvent,
ChatOpsType,
EntityEvent,
write_chatops_insight_log,
write_resource_insight_log,
)
logger = logging.getLogger(__name__)
@ -121,6 +127,7 @@ class UserView(
"get_verification_code",
"get_backend_verification_code",
"get_telegram_verification_code",
"unlink_slack",
"unlink_telegram",
"unlink_backend",
"make_test_call",
@ -140,6 +147,7 @@ class UserView(
"get_verification_code",
"get_backend_verification_code",
"get_telegram_verification_code",
"unlink_slack",
"unlink_telegram",
"unlink_backend",
"make_test_call",
@ -259,41 +267,37 @@ class UserView(
def verify_number(self, request, pk):
target_user = self.get_object()
code = request.query_params.get("token", None)
old_state = target_user.repr_settings_for_client_side_logging
prev_state = target_user.insight_logs_serialized
phone_manager = PhoneManager(target_user)
verified, error = phone_manager.verify_phone_number(code)
if not verified:
return Response(error, status=status.HTTP_400_BAD_REQUEST)
organization = request.auth.organization
new_state = target_user.repr_settings_for_client_side_logging
description = f"User settings for user {target_user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
request.user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = target_user.insight_logs_serialized
write_resource_insight_log(
instance=target_user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=["put"])
def forget_number(self, request, pk):
target_user = self.get_object()
old_state = target_user.repr_settings_for_client_side_logging
prev_state = target_user.insight_logs_serialized
phone_manager = PhoneManager(target_user)
forget = phone_manager.forget_phone_number()
if forget:
organization = request.auth.organization
new_state = target_user.repr_settings_for_client_side_logging
description = (
f"User settings for user {target_user.username} was changed from:\n{old_state}\nto:\n{new_state}"
)
create_organization_log(
organization,
request.user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = target_user.insight_logs_serialized
write_resource_insight_log(
instance=target_user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
return Response(status=status.HTTP_200_OK)
@ -348,29 +352,41 @@ class UserView(
return Response({"telegram_code": str(new_code.uuid), "bot_link": bot_link}, status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def unlink_slack(self, request, pk):
user = self.get_object()
user.slack_user_identity = None
user.save(update_fields=["slack_user_identity"])
write_chatops_insight_log(
author=request.user,
event_name=ChatOpsEvent.USER_UNLINKED,
chatops_type=ChatOpsType.SLACK,
linked_user=user.username,
linked_user_id=user.public_primary_key,
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def unlink_telegram(self, request, pk):
user = self.get_object()
TelegramToUserConnector = apps.get_model("telegram", "TelegramToUserConnector")
try:
connector = TelegramToUserConnector.objects.get(user=user)
connector.delete()
write_chatops_insight_log(
author=request.user,
event_name=ChatOpsEvent.USER_UNLINKED,
chatops_type=ChatOpsType.TELEGRAM,
linked_user=user.username,
linked_user_id=user.public_primary_key,
)
except TelegramToUserConnector.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
description = f"Telegram account of user {user.username} was disconnected"
create_organization_log(
user.organization,
user,
OrganizationLogType.TYPE_TELEGRAM_FROM_USER_DISCONNECTED,
description,
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def unlink_backend(self, request, pk):
# TODO: insight logs support
backend_id = request.query_params.get("backend")
backend = get_messaging_backend_from_id(backend_id)
if backend is None:
@ -379,17 +395,15 @@ class UserView(
user = self.get_object()
try:
backend.unlink_user(user)
write_chatops_insight_log(
author=request.user,
event_name=ChatOpsEvent.USER_UNLINKED,
chatops_type=backend.backend_id,
linked_user=user.username,
linked_user_id=user.public_primary_key,
)
except ObjectDoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
description = f"{backend.label} account of user {user.username} was disconnected"
create_organization_log(
user.organization,
user,
OrganizationLogType.TYPE_MESSAGING_BACKEND_USER_DISCONNECTED,
description,
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=["get", "post", "delete"])
@ -412,6 +426,7 @@ class UserView(
if self.request.method == "POST":
try:
instance, token = UserScheduleExportAuthToken.create_auth_token(user, user.organization)
write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.CREATED)
except IntegrityError:
raise Conflict("Schedule export token for user already exists")
@ -426,10 +441,10 @@ class UserView(
if self.request.method == "DELETE":
try:
token = UserScheduleExportAuthToken.objects.get(user=user)
write_resource_insight_log(instance=token, author=self.request.user, event=EntityEvent.DELETED)
token.delete()
except UserScheduleExportAuthToken.DoesNotExist:
raise NotFound
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=["get", "post", "delete"])

View file

@ -24,9 +24,10 @@ from apps.base.messaging import get_messaging_backend_from_id
from apps.base.models import UserNotificationPolicy
from apps.base.models.user_notification_policy import BUILT_IN_BACKENDS, NotificationChannelAPIOptions
from apps.user_management.models import User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import UpdateSerializerMixin
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.insight_log import EntityEvent, write_resource_insight_log
class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
@ -55,14 +56,14 @@ class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
except ValueError:
raise BadRequest(detail="Invalid user param")
if user_id is None or user_id == self.request.user.public_primary_key:
queryset = self.model.objects.get_or_create_for_user(user=self.request.user, important=important)
queryset = self.model.objects.filter(user=self.request.user, important=important)
else:
try:
target_user = User.objects.get(public_primary_key=user_id)
except User.DoesNotExist:
raise BadRequest(detail="User does not exist")
queryset = self.model.objects.get_or_create_for_user(user=target_user, important=important)
queryset = self.model.objects.filter(user=target_user, important=important)
queryset = self.serializer_class.setup_eager_loading(queryset)
@ -83,45 +84,45 @@ class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
return obj
def perform_create(self, serializer):
organization = self.request.auth.organization
user = serializer.validated_data.get("user") or self.request.user
old_state = user.repr_settings_for_client_side_logging
prev_state = user.insight_logs_serialized
serializer.save()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
self.request.user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = serializer.validated_data.get("user") or self.request.user
old_state = user.repr_settings_for_client_side_logging
prev_state = user.insight_logs_serialized
serializer.save()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
self.request.user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = instance.user
old_state = user.repr_settings_for_client_side_logging
instance.delete()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
self.request.user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
prev_state = user.insight_logs_serialized
try:
instance.delete()
except UserNotificationPolicyCouldNotBeDeleted:
raise BadRequest(detail="Can't delete last user notification policy")
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
@action(detail=True, methods=["put"])
@ -176,7 +177,8 @@ class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
continue
# extra backends may be enabled per organization
if notification_channel.name not in BUILT_IN_BACKENDS:
built_in_backend_names = {b[0] for b in BUILT_IN_BACKENDS}
if notification_channel.name not in built_in_backend_names:
extra_messaging_backend = get_messaging_backend_from_id(notification_channel.name)
if extra_messaging_backend is None:
continue

View file

@ -5,7 +5,6 @@ from django.db import models
from apps.auth_token import constants, crypto
from apps.auth_token.models.base_auth_token import BaseAuthToken
from apps.user_management.models import Organization, User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
class ApiAuthToken(BaseAuthToken):
@ -27,6 +26,22 @@ class ApiAuthToken(BaseAuthToken):
organization=organization,
name=name,
)
description = f"API token {instance.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_API_TOKEN_CREATED, description)
return instance, token_string
# Insight logs
@property
def insight_logs_type_verbal(self):
return "public_api_token"
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
# API tokens are not modifiable, so return empty dict to implement InsightLoggable interface
return {}
@property
def insight_logs_metadata(self):
return {}

View file

@ -6,7 +6,6 @@ from apps.auth_token import constants, crypto
from apps.auth_token.models.base_auth_token import BaseAuthToken
from apps.schedules.models import OnCallSchedule
from apps.user_management.models import Organization, User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
class ScheduleExportAuthToken(BaseAuthToken):
@ -38,8 +37,22 @@ class ScheduleExportAuthToken(BaseAuthToken):
organization=organization,
schedule=schedule,
)
description = "Schedule export token was created by user {0} for schedule {1}".format(
user.username, schedule.name
)
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED, description)
return instance, token_string
# Insight logs
@property
def insight_logs_type_verbal(self):
return "schedule_export_token"
@property
def insight_logs_verbal(self):
return f"Schedule export token for {self.schedule.insight_logs_verbal}"
@property
def insight_logs_serialized(self):
# Schedule export tokens are not modifiable, return empty dict to implement InsightLoggable interface
return {}
@property
def insight_logs_metadata(self):
return {}

View file

@ -5,7 +5,6 @@ from django.db import models
from apps.auth_token import constants, crypto
from apps.auth_token.models.base_auth_token import BaseAuthToken
from apps.user_management.models import Organization, User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
class UserScheduleExportAuthToken(BaseAuthToken):
@ -31,6 +30,22 @@ class UserScheduleExportAuthToken(BaseAuthToken):
user=user,
organization=organization,
)
description = "User schedule export token was created by user {0}".format(user.username)
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED, description)
return instance, token_string
# Insight logs
@property
def insight_logs_type_verbal(self):
return "user_schedule_export_token"
@property
def insight_logs_verbal(self):
return f"Users chedule export token for {self.user.username}"
@property
def insight_logs_serialized(self):
# Schedule export tokens are not modifiable, return empty dict to implement InsightLoggable interface
return {}
@property
def insight_logs_metadata(self):
return {}

View file

@ -9,6 +9,9 @@ class BaseMessagingBackend:
available_for_use = False
templater = None
def __init__(self, *args, **kwargs):
self.notification_channel_id = kwargs.get("notification_channel_id")
def get_templater_class(self):
if self.templater:
return import_string(self.templater)
@ -46,16 +49,16 @@ class BaseMessagingBackend:
raise NotImplementedError("notify_user method missing implementation")
def load_backend(path):
return import_string(path)()
def load_backend(path, *args, **kwargs):
return import_string(path)(*args, **kwargs)
def get_messaging_backends():
global _messaging_backends
if _messaging_backends is None:
_messaging_backends = {}
for backend_path in settings.EXTRA_MESSAGING_BACKENDS:
backend = load_backend(backend_path)
for (backend_path, notification_channel_id) in settings.EXTRA_MESSAGING_BACKENDS:
backend = load_backend(backend_path, notification_channel_id=notification_channel_id)
_messaging_backends[backend.backend_id] = backend
return _messaging_backends.items()

View file

@ -1,7 +1,6 @@
# Generated by Django 3.2.5 on 2022-05-31 14:46
import apps.base.models.live_setting
import apps.base.models.organization_log_record
import apps.base.models.user_notification_policy
import datetime
import django.core.validators
@ -51,7 +50,7 @@ class Migration(migrations.Migration):
name='OrganizationLogRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_primary_key', models.CharField(default=apps.base.models.organization_log_record.generate_public_primary_key_for_organization_log, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])),
('public_primary_key', models.CharField(max_length=20, null=True, default=None)),
('created_at', models.DateTimeField(auto_now_add=True)),
('description', models.TextField(default=None, null=True)),
('_labels', models.JSONField(default=list)),

View file

@ -0,0 +1,16 @@
# Generated by Django 3.2.5 on 2022-08-23 12:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0002_squashed_initial'),
]
operations = [
migrations.DeleteModel(
name='OrganizationLogRecord',
),
]

View file

@ -1,6 +1,5 @@
from .dynamic_setting import DynamicSetting # noqa: F401
from .failed_to_invoke_celery_task import FailedToInvokeCeleryTask # noqa: F401
from .live_setting import LiveSetting # noqa: F401
from .organization_log_record import OrganizationLogRecord # noqa: F401
from .user_notification_policy import UserNotificationPolicy # noqa: F401
from .user_notification_policy_log_record import UserNotificationPolicyLogRecord # noqa: F401

View file

@ -1,32 +1,8 @@
from django.db import IntegrityError, models
from django.db import models
from django.db.models import JSONField
class DynamicSettingsManager(models.Manager):
def get_or_create(self, defaults=None, **kwargs):
"""
Using get_or_create inside celery task sometimes triggers making two identical DynamicSettings.
E.g. https://gitlab.amixr.io/amixr/amixr/issues/843
More info: https://stackoverflow.com/questions/17960593/multipleobjectsreturned-with-get-or-create
Solution is to create UniqueConstraint on DynamicSetting.Name and catch IntegrityError.
Django 3 has built-in check https://github.com/django/django/blob/master/django/db/models/query.py#L571
As for now we are using Django 2.2 which has not.
# TODO: remove this method when we will move to Django 3
So it is overridden get_or_create to catch IntegrityError and just return object in this case.
"""
try:
return super(DynamicSettingsManager, self).get_or_create(defaults=defaults, **kwargs)
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
class DynamicSetting(models.Model):
objects = DynamicSettingsManager()
name = models.CharField(max_length=100)
boolean_value = models.BooleanField(null=True, default=None)
numeric_value = models.IntegerField(null=True, default=None)

View file

@ -54,44 +54,44 @@ class LiveSetting(models.Model):
"SLACK_SIGNING_SECRET": (
"Check <a href='"
"https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup"
"'>instruction</a> for details how to set up Slack. "
"' target='_blank'>instruction</a> for details how to set up Slack. "
"Slack secrets can't be verified on the backend, please try installing the Slack Bot "
"after you update them."
),
"SLACK_CLIENT_OAUTH_SECRET": (
"Check <a href='"
"https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup"
"'>instruction</a> for details how to set up Slack. "
"' target='_blank'>instruction</a> for details how to set up Slack. "
"Slack secrets can't be verified on the backend, please try installing the Slack Bot "
"after you update them."
),
"SLACK_CLIENT_OAUTH_ID": (
"Check <a href='"
"https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup"
"'>instruction</a> for details how to set up Slack. "
"' target='_blank'>instruction</a> for details how to set up Slack. "
"Slack secrets can't be verified on the backend, please try installing the Slack Bot "
"after you update them."
),
"SLACK_INSTALL_RETURN_REDIRECT_HOST": (
"Check <a href='"
"https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup"
"'>instruction</a> for details how to set up Slack. "
"' target='_blank'>instruction</a> for details how to set up Slack. "
"Slack secrets can't be verified on the backend, please try installing the Slack Bot "
"after you update them."
),
"TWILIO_ACCOUNT_SID": (
"Twilio username to allow amixr send sms and make phone calls, "
"<a href='https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them'>"
"<a href='https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them' target='_blank'>"
"more info</a>."
),
"TWILIO_AUTH_TOKEN": (
"Twilio password to allow amixr send sms and make calls, "
"<a href='https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them'>"
"<a href='https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them' target='_blank'>"
"more info</a>."
),
"TWILIO_NUMBER": (
"Number from which you will receive calls and SMS, "
"<a href='https://www.twilio.com/docs/phone-numbers'>more info</a>."
"<a href='https://www.twilio.com/docs/phone-numbers' target='_blank'>more info</a>."
),
"TWILIO_VERIFY_SERVICE_SID": (
"SID of Twilio service for number verification. "
@ -100,15 +100,16 @@ class LiveSetting(models.Model):
),
"SENDGRID_API_KEY": (
"Sendgrid api key to send emails, "
"<a href='https://sendgrid.com/docs/ui/account-and-settings/api-keys/'>more info</a>."
"<a href='https://sendgrid.com/docs/ui/account-and-settings/api-keys/' target='_blank'>more info</a>."
),
"SENDGRID_FROM_EMAIL": (
"Address to send emails, <a href='https://sendgrid.com/docs/ui/sending-email/senders/'>" "more info</a>."
"Address to send emails, <a href='https://sendgrid.com/docs/ui/sending-email/senders/' target='_blank'>"
"more info</a>."
),
"SENDGRID_SECRET_KEY": "It is the secret key to secure receiving inbound emails.",
"SENDGRID_INBOUND_EMAIL_DOMAIN": "Domain to receive emails for inbound emails integration.",
"TELEGRAM_TOKEN": (
"Secret token for Telegram bot, you can get one via <a href='https://t.me/BotFather'>BotFather</a>."
"Secret token for Telegram bot, you can get one via <a href='https://t.me/BotFather' target='_blank'>BotFather</a>."
),
"TELEGRAM_WEBHOOK_HOST": (
"Externally available URL for Telegram to make requests. Must use https and ports 80, 88, 443, 8443."
@ -116,7 +117,7 @@ class LiveSetting(models.Model):
"SEND_ANONYMOUS_USAGE_STATS": (
"Grafana OnCall will send anonymous, but uniquely-identifiable usage analytics to Grafana Labs."
" These statistics are sent to https://stats.grafana.org/. For more information on what's sent, look at the "
"<a href='https://github.com/grafana/oncall/blob/dev/engine/apps/oss_installation/usage_stats.py#L29'> source code</a>."
"<a href='https://github.com/grafana/oncall/blob/dev/engine/apps/oss_installation/usage_stats.py#L29' target='_blank'> source code</a>."
),
"GRAFANA_CLOUD_ONCALL_TOKEN": "Secret token for Grafana Cloud OnCall instance.",
"GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED": "Enable heartbeat integration with Grafana Cloud OnCall.",

View file

@ -1,317 +0,0 @@
from django.apps import apps
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import models
from django.db.models import JSONField
from emoji import emojize
from apps.alerts.models.maintainable_object import MaintainableObject
from apps.user_management.organization_log_creator import OrganizationLogType
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
def generate_public_primary_key_for_organization_log():
prefix = "V"
new_public_primary_key = generate_public_primary_key(prefix)
failure_counter = 0
while OrganizationLogRecord.objects.filter(public_primary_key=new_public_primary_key).exists():
new_public_primary_key = increase_public_primary_key_length(
failure_counter=failure_counter, prefix=prefix, model_name="OrganizationLogRecord"
)
failure_counter += 1
return new_public_primary_key
class OrganizationLogRecordManager(models.Manager):
def create(self, organization, author, type, description):
# set labels
labels = OrganizationLogRecord.LABELS_FOR_TYPE[type]
return super().create(
organization=organization,
author=author,
description=description,
_labels=labels,
)
class OrganizationLogRecord(models.Model):
objects = OrganizationLogRecordManager()
LABEL_ORGANIZATION = "organization"
LABEL_SLACK = "slack"
LABEL_TELEGRAM = "telegram"
LABEL_DEFAULT_CHANNEL = "default channel"
LABEL_SLACK_WORKSPACE_CONNECTED = "slack workspace connected"
LABEL_SLACK_WORKSPACE_DISCONNECTED = "slack workspace disconnected"
LABEL_TELEGRAM_CHANNEL_CONNECTED = "telegram channel connected"
LABEL_TELEGRAM_CHANNEL_DISCONNECTED = "telegram channel disconnected"
LABEL_INTEGRATION = "integration"
LABEL_INTEGRATION_CREATED = "integration created"
LABEL_INTEGRATION_DELETED = "integration deleted"
LABEL_INTEGRATION_CHANGED = "integration changed"
LABEL_INTEGRATION_HEARTBEAT = "integration heartbeat"
LABEL_INTEGRATION_HEARTBEAT_CREATED = "integration heartbeat created"
LABEL_INTEGRATION_HEARTBEAT_CHANGED = "integration heartbeat changed"
LABEL_MAINTENANCE = "maintenance"
LABEL_MAINTENANCE_STARTED = "maintenance started"
LABEL_MAINTENANCE_STOPPED = "maintenance stopped"
LABEL_DEBUG = "debug"
LABEL_DEBUG_STARTED = "debug started"
LABEL_DEBUG_STOPPED = "debug stopped"
LABEL_CHANNEL_FILTER = "route"
LABEL_CHANNEL_FILTER_CREATED = "route created"
LABEL_CHANNEL_FILTER_CHANGED = "route changed"
LABEL_CHANNEL_FILTER_DELETED = "route deleted"
LABEL_ESCALATION_CHAIN = "escalation chain"
LABEL_ESCALATION_CHAIN_CREATED = "escalation chain created"
LABEL_ESCALATION_CHAIN_DELETED = "escalation chain deleted"
LABEL_ESCALATION_CHAIN_CHANGED = "escalation chain changed"
LABEL_ESCALATION_POLICY = "escalation policy"
LABEL_ESCALATION_POLICY_CREATED = "escalation policy created"
LABEL_ESCALATION_POLICY_DELETED = "escalation policy deleted"
LABEL_ESCALATION_POLICY_CHANGED = "escalation policy changed"
LABEL_CUSTOM_ACTION = "custom action"
LABEL_CUSTOM_ACTION_CREATED = "custom action created"
LABEL_CUSTOM_ACTION_DELETED = "custom action deleted"
LABEL_CUSTOM_ACTION_CHANGED = "custom action changed"
LABEL_SCHEDULE = "schedule"
LABEL_SCHEDULE_CREATED = "schedule created"
LABEL_SCHEDULE_DELETED = "schedule deleted"
LABEL_SCHEDULE_CHANGED = "schedule changed"
LABEL_ON_CALL_SHIFT = "on-call shift"
LABEL_ON_CALL_SHIFT_CREATED = "on-call shift created"
LABEL_ON_CALL_SHIFT_DELETED = "on-call shift deleted"
LABEL_ON_CALL_SHIFT_CHANGED = "on-call shift changed"
LABEL_USER = "user"
LABEL_USER_CREATED = "user created"
LABEL_USER_SETTINGS_CHANGED = "user changed"
LABEL_ORGANIZATION_SETTINGS_CHANGED = "organization settings changed"
LABEL_TELEGRAM_TO_USER_CONNECTED = "telegram to user connected"
LABEL_TELEGRAM_FROM_USER_DISCONNECTED = "telegram from user disconnected"
LABEL_API_TOKEN = "api token"
LABEL_API_TOKEN_CREATED = "api token created"
LABEL_API_TOKEN_REVOKED = "api token revoked"
LABEL_ESCALATION_CHAIN_COPIED = "escalation chain copied"
LABEL_SCHEDULE_EXPORT_TOKEN = "schedule export token"
LABEL_SCHEDULE_EXPORT_TOKEN_CREATED = "schedule export token created"
LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED = "messaging backend channel changed"
LABEL_MESSAGING_BACKEND_CHANNEL_DELETED = "messaging backend channel deleted"
LABEL_MESSAGING_BACKEND_USER_DISCONNECTED = "messaging backend user disconnected"
LABELS = [
LABEL_ORGANIZATION,
LABEL_SLACK,
LABEL_TELEGRAM,
LABEL_DEFAULT_CHANNEL,
LABEL_SLACK_WORKSPACE_CONNECTED,
LABEL_SLACK_WORKSPACE_DISCONNECTED,
LABEL_TELEGRAM_CHANNEL_CONNECTED,
LABEL_TELEGRAM_CHANNEL_DISCONNECTED,
LABEL_INTEGRATION,
LABEL_INTEGRATION_CREATED,
LABEL_INTEGRATION_DELETED,
LABEL_INTEGRATION_CHANGED,
LABEL_INTEGRATION_HEARTBEAT,
LABEL_INTEGRATION_HEARTBEAT_CREATED,
LABEL_INTEGRATION_HEARTBEAT_CHANGED,
LABEL_MAINTENANCE,
LABEL_MAINTENANCE_STARTED,
LABEL_MAINTENANCE_STOPPED,
LABEL_DEBUG,
LABEL_DEBUG_STARTED,
LABEL_DEBUG_STOPPED,
LABEL_CHANNEL_FILTER,
LABEL_CHANNEL_FILTER_CREATED,
LABEL_CHANNEL_FILTER_CHANGED,
LABEL_CHANNEL_FILTER_DELETED,
LABEL_ESCALATION_CHAIN,
LABEL_ESCALATION_CHAIN_CREATED,
LABEL_ESCALATION_CHAIN_DELETED,
LABEL_ESCALATION_CHAIN_CHANGED,
LABEL_ESCALATION_POLICY,
LABEL_ESCALATION_POLICY_CREATED,
LABEL_ESCALATION_POLICY_DELETED,
LABEL_ESCALATION_POLICY_CHANGED,
LABEL_CUSTOM_ACTION,
LABEL_CUSTOM_ACTION_CREATED,
LABEL_CUSTOM_ACTION_DELETED,
LABEL_CUSTOM_ACTION_CHANGED,
LABEL_SCHEDULE,
LABEL_SCHEDULE_CREATED,
LABEL_SCHEDULE_DELETED,
LABEL_SCHEDULE_CHANGED,
LABEL_ON_CALL_SHIFT,
LABEL_ON_CALL_SHIFT_CREATED,
LABEL_ON_CALL_SHIFT_DELETED,
LABEL_ON_CALL_SHIFT_CHANGED,
LABEL_USER,
LABEL_USER_CREATED,
LABEL_USER_SETTINGS_CHANGED,
LABEL_ORGANIZATION_SETTINGS_CHANGED,
LABEL_TELEGRAM_TO_USER_CONNECTED,
LABEL_TELEGRAM_FROM_USER_DISCONNECTED,
LABEL_API_TOKEN,
LABEL_API_TOKEN_CREATED,
LABEL_API_TOKEN_REVOKED,
LABEL_ESCALATION_CHAIN_COPIED,
LABEL_SCHEDULE_EXPORT_TOKEN,
LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED,
LABEL_MESSAGING_BACKEND_CHANNEL_DELETED,
LABEL_MESSAGING_BACKEND_USER_DISCONNECTED,
]
LABELS_FOR_TYPE = {
OrganizationLogType.TYPE_SLACK_DEFAULT_CHANNEL_CHANGED: [LABEL_SLACK, LABEL_DEFAULT_CHANNEL],
OrganizationLogType.TYPE_SLACK_WORKSPACE_CONNECTED: [LABEL_SLACK, LABEL_SLACK_WORKSPACE_CONNECTED],
OrganizationLogType.TYPE_SLACK_WORKSPACE_DISCONNECTED: [LABEL_SLACK, LABEL_SLACK_WORKSPACE_DISCONNECTED],
OrganizationLogType.TYPE_TELEGRAM_DEFAULT_CHANNEL_CHANGED: [LABEL_TELEGRAM, LABEL_DEFAULT_CHANNEL],
OrganizationLogType.TYPE_TELEGRAM_CHANNEL_CONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_CHANNEL_CONNECTED],
OrganizationLogType.TYPE_TELEGRAM_CHANNEL_DISCONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_CHANNEL_DISCONNECTED],
OrganizationLogType.TYPE_INTEGRATION_CREATED: [LABEL_INTEGRATION, LABEL_INTEGRATION_CREATED],
OrganizationLogType.TYPE_INTEGRATION_DELETED: [LABEL_INTEGRATION, LABEL_INTEGRATION_DELETED],
OrganizationLogType.TYPE_INTEGRATION_CHANGED: [LABEL_INTEGRATION, LABEL_INTEGRATION_CHANGED],
OrganizationLogType.TYPE_HEARTBEAT_CREATED: [LABEL_INTEGRATION_HEARTBEAT, LABEL_INTEGRATION_HEARTBEAT_CREATED],
OrganizationLogType.TYPE_HEARTBEAT_CHANGED: [LABEL_INTEGRATION_HEARTBEAT, LABEL_INTEGRATION_HEARTBEAT_CHANGED],
OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_CREATED],
OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_DELETED],
OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED: [LABEL_CHANNEL_FILTER, LABEL_CHANNEL_FILTER_CHANGED],
OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_CREATED],
OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_DELETED],
OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_CHANGED],
OrganizationLogType.TYPE_ESCALATION_STEP_CREATED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_CREATED],
OrganizationLogType.TYPE_ESCALATION_STEP_DELETED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_DELETED],
OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED: [LABEL_ESCALATION_POLICY, LABEL_ESCALATION_POLICY_CHANGED],
OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_ORGANIZATION: [
LABEL_MAINTENANCE,
LABEL_MAINTENANCE_STARTED,
LABEL_ORGANIZATION,
],
OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_INTEGRATION: [
LABEL_MAINTENANCE,
LABEL_MAINTENANCE_STARTED,
LABEL_INTEGRATION,
],
OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_ORGANIZATION: [
LABEL_MAINTENANCE,
LABEL_MAINTENANCE_STOPPED,
LABEL_ORGANIZATION,
],
OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_INTEGRATION: [
LABEL_MAINTENANCE,
LABEL_MAINTENANCE_STOPPED,
LABEL_INTEGRATION,
],
OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_ORGANIZATION: [
LABEL_DEBUG,
LABEL_DEBUG_STARTED,
LABEL_ORGANIZATION,
],
OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_INTEGRATION: [
LABEL_DEBUG,
LABEL_DEBUG_STARTED,
LABEL_INTEGRATION,
],
OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_ORGANIZATION: [
LABEL_DEBUG,
LABEL_DEBUG_STOPPED,
LABEL_ORGANIZATION,
],
OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_INTEGRATION: [
LABEL_DEBUG,
LABEL_DEBUG_STOPPED,
LABEL_INTEGRATION,
],
OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_CREATED],
OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_DELETED],
OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED: [LABEL_CUSTOM_ACTION, LABEL_CUSTOM_ACTION_CHANGED],
OrganizationLogType.TYPE_SCHEDULE_CREATED: [LABEL_SCHEDULE, LABEL_SCHEDULE_CREATED],
OrganizationLogType.TYPE_SCHEDULE_DELETED: [LABEL_SCHEDULE, LABEL_SCHEDULE_DELETED],
OrganizationLogType.TYPE_SCHEDULE_CHANGED: [LABEL_SCHEDULE, LABEL_SCHEDULE_CHANGED],
OrganizationLogType.TYPE_ON_CALL_SHIFT_CREATED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_CREATED],
OrganizationLogType.TYPE_ON_CALL_SHIFT_DELETED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_DELETED],
OrganizationLogType.TYPE_ON_CALL_SHIFT_CHANGED: [LABEL_ON_CALL_SHIFT, LABEL_ON_CALL_SHIFT_CHANGED],
OrganizationLogType.TYPE_NEW_USER_ADDED: [LABEL_USER, LABEL_USER_CREATED],
OrganizationLogType.TYPE_ORGANIZATION_SETTINGS_CHANGED: [
LABEL_ORGANIZATION,
LABEL_ORGANIZATION_SETTINGS_CHANGED,
],
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED: [LABEL_USER, LABEL_USER_SETTINGS_CHANGED],
OrganizationLogType.TYPE_TELEGRAM_TO_USER_CONNECTED: [LABEL_TELEGRAM, LABEL_TELEGRAM_TO_USER_CONNECTED],
OrganizationLogType.TYPE_TELEGRAM_FROM_USER_DISCONNECTED: [
LABEL_TELEGRAM,
LABEL_TELEGRAM_FROM_USER_DISCONNECTED,
],
OrganizationLogType.TYPE_API_TOKEN_CREATED: [LABEL_API_TOKEN, LABEL_API_TOKEN_CREATED],
OrganizationLogType.TYPE_API_TOKEN_REVOKED: [LABEL_API_TOKEN, LABEL_API_TOKEN_REVOKED],
OrganizationLogType.TYPE_ESCALATION_CHAIN_COPIED: [LABEL_ESCALATION_CHAIN, LABEL_ESCALATION_CHAIN_COPIED],
OrganizationLogType.TYPE_SCHEDULE_EXPORT_TOKEN_CREATED: [
LABEL_SCHEDULE_EXPORT_TOKEN,
LABEL_SCHEDULE_EXPORT_TOKEN_CREATED,
],
OrganizationLogType.TYPE_MESSAGING_BACKEND_CHANNEL_CHANGED: [LABEL_MESSAGING_BACKEND_CHANNEL_CHANGED],
OrganizationLogType.TYPE_MESSAGING_BACKEND_CHANNEL_DELETED: [LABEL_MESSAGING_BACKEND_CHANNEL_DELETED],
OrganizationLogType.TYPE_MESSAGING_BACKEND_USER_DISCONNECTED: [LABEL_MESSAGING_BACKEND_USER_DISCONNECTED],
}
public_primary_key = models.CharField(
max_length=20,
validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
unique=True,
default=generate_public_primary_key_for_organization_log,
)
organization = models.ForeignKey(
"user_management.Organization", on_delete=models.CASCADE, related_name="log_records"
)
author = models.ForeignKey(
"user_management.User",
on_delete=models.SET_NULL,
related_name="team_log_records",
default=None,
null=True,
)
created_at = models.DateTimeField(auto_now_add=True)
description = models.TextField(null=True, default=None)
_labels = JSONField(default=list)
@property
def labels(self):
return self._labels
@staticmethod
def get_log_type_and_maintainable_object_verbal(maintainable_obj, mode, verbal, stopped=False):
AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
Organization = apps.get_model("user_management", "Organization")
object_verbal_map = {
AlertReceiveChannel: f"integration {emojize(verbal, use_aliases=True)}",
Organization: "organization",
}
if stopped:
log_type_map = {
AlertReceiveChannel: {
MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_INTEGRATION,
MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_INTEGRATION,
},
Organization: {
MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STOPPED_FOR_ORGANIZATION,
MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STOPPED_FOR_ORGANIZATION,
},
}
else:
log_type_map = {
AlertReceiveChannel: {
MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_INTEGRATION,
MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_INTEGRATION,
},
Organization: {
MaintainableObject.DEBUG_MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_DEBUG_STARTED_FOR_ORGANIZATION,
MaintainableObject.MAINTENANCE: OrganizationLogType.TYPE_MAINTENANCE_STARTED_FOR_ORGANIZATION,
},
}
log_type = log_type_map[type(maintainable_obj)][mode]
object_verbal = object_verbal_map[type(maintainable_obj)]
return log_type, object_verbal

View file

@ -1,16 +1,17 @@
from enum import unique
from typing import Tuple
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db import models
from django.db.models import Q, QuerySet
from django.utils import timezone
from ordered_model.models import OrderedModel
from apps.base.messaging import get_messaging_backends
from apps.user_management.models import User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
@ -30,13 +31,13 @@ def generate_public_primary_key_for_notification_policy():
# base supported notification backends
BUILT_IN_BACKENDS = (
"SLACK",
"SMS",
"PHONE_CALL",
"TELEGRAM",
"EMAIL",
"MOBILE_PUSH_GENERAL",
"MOBILE_PUSH_CRITICAL",
("SLACK", 0),
("SMS", 1),
("PHONE_CALL", 2),
("TELEGRAM", 3),
("EMAIL", 4),
("MOBILE_PUSH_GENERAL", 5),
("MOBILE_PUSH_CRITICAL", 6),
)
@ -49,10 +50,10 @@ def _notification_channel_choices():
# use NotificationChannelOptions.AVAILABLE_FOR_USE instead.
supported_backends = list(BUILT_IN_BACKENDS)
for backend_id, _ in get_messaging_backends():
supported_backends.append(backend_id)
for backend_id, backend in get_messaging_backends():
supported_backends.append((backend_id, backend.notification_channel_id))
channels_enum = models.IntegerChoices("NotificationChannel", supported_backends, start=0)
channels_enum = unique(models.IntegerChoices("NotificationChannel", supported_backends))
return channels_enum
@ -69,33 +70,6 @@ def validate_channel_choice(value):
class UserNotificationPolicyQuerySet(models.QuerySet):
def get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
with transaction.atomic():
User.objects.select_for_update().get(pk=user.pk)
return self._get_or_create_for_user(user, important)
def _get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
notification_policies = super().filter(user=user, important=important)
if notification_policies.exists():
return notification_policies
old_state = user.repr_settings_for_client_side_logging
if important:
policies = self.create_important_policies_for_user(user)
else:
policies = self.create_default_policies_for_user(user)
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
user.organization,
None,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
)
return policies
def create_default_policies_for_user(self, user: User) -> "QuerySet[UserNotificationPolicy]":
model = self.model
@ -206,6 +180,12 @@ class UserNotificationPolicy(OrderedModel):
else:
return "Not set"
def delete(self):
if UserNotificationPolicy.objects.filter(important=self.important, user=self.user).count() == 1:
raise UserNotificationPolicyCouldNotBeDeleted("Can't delete last user notification policy")
else:
super().delete()
class NotificationChannelOptions:
"""

View file

@ -1,6 +1,6 @@
import factory
from apps.base.models import LiveSetting, OrganizationLogRecord, UserNotificationPolicy, UserNotificationPolicyLogRecord
from apps.base.models import LiveSetting, UserNotificationPolicy, UserNotificationPolicyLogRecord
class UserNotificationPolicyFactory(factory.DjangoModelFactory):
@ -13,13 +13,6 @@ class UserNotificationPolicyLogRecordFactory(factory.DjangoModelFactory):
model = UserNotificationPolicyLogRecord
class OrganizationLogRecordFactory(factory.DjangoModelFactory):
description = factory.Faker("sentence", nb_words=4)
class Meta:
model = OrganizationLogRecord
class LiveSettingFactory(factory.DjangoModelFactory):
class Meta:
model = LiveSetting

View file

@ -1,18 +0,0 @@
import pytest
from apps.base.models import OrganizationLogRecord
@pytest.mark.django_db
def test_organization_log_set_general_log_channel(
make_organization_with_slack_team_identity, make_user_for_organization, make_slack_channel
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
user = make_user_for_organization(organization)
slack_channel = make_slack_channel(slack_team_identity)
organization.set_general_log_channel(slack_channel.slack_id, slack_channel.name, user)
assert organization.log_records.filter(
_labels=[OrganizationLogRecord.LABEL_SLACK, OrganizationLogRecord.LABEL_DEFAULT_CHANNEL]
).exists()

View file

@ -9,6 +9,7 @@ from apps.base.models.user_notification_policy import (
validate_channel_choice,
)
from apps.base.tests.messaging_backend import TestOnlyBackend
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
@pytest.mark.parametrize(
@ -80,3 +81,25 @@ def test_extra_messaging_backends_details():
)
assert validate_channel_choice(channel_choice) is None
@pytest.mark.django_db
def test_unable_to_delete_last_notification_policy(
make_organization,
make_user_for_organization,
make_user_notification_policy,
):
organization = make_organization()
user = make_user_for_organization(organization)
first_policy = make_user_notification_policy(
user, UserNotificationPolicy.Step.NOTIFY, notify_by=UserNotificationPolicy.NotificationChannel.SLACK
)
second_policy = make_user_notification_policy(
user, UserNotificationPolicy.Step.WAIT, wait_delay=timedelta(minutes=5)
)
first_policy.delete()
with pytest.raises(UserNotificationPolicyCouldNotBeDeleted):
second_policy.delete()

View file

@ -1,7 +1,6 @@
import logging
from urllib.parse import urljoin
import humanize
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import models, transaction
@ -171,14 +170,6 @@ class IntegrationHeartBeat(BaseHeartBeat):
"alerts.AlertReceiveChannel", on_delete=models.CASCADE, related_name="integration_heartbeat"
)
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
timeout: 30 minutes
"""
return f"timeout: {humanize.naturaldelta(self.timeout_seconds)}"
@property
def is_expired(self):
if self.last_heartbeat_time is not None:
@ -242,3 +233,25 @@ class IntegrationHeartBeat(BaseHeartBeat):
(43200, "12 hours"),
(86400, "1 day"),
)
# Insight logs
@property
def insight_logs_type_verbal(self):
return "integration_heartbeat"
@property
def insight_logs_verbal(self):
return f"Integration Heartbeat for {self.alert_receive_channel.insight_logs_verbal}"
@property
def insight_logs_serialized(self):
return {
"timeout": self.timeout_seconds,
}
@property
def insight_logs_metadata(self):
return {
"integration": self.alert_receive_channel.insight_logs_verbal,
"integration_id": self.alert_receive_channel.public_primary_key,
}

View file

@ -6,10 +6,10 @@ from apps.alerts.models import CustomButton
from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers.action import ActionCreateSerializer, ActionUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import PublicPrimaryKeyMixin, RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class ActionView(RateLimitHeadersMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet):
@ -36,24 +36,28 @@ class ActionView(RateLimitHeadersMixin, PublicPrimaryKeyMixin, UpdateSerializerM
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED, description)
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Custom action {serializer.instance.name} was changed " f"from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED, description)
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED, description)
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()

View file

@ -8,10 +8,10 @@ from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers import EscalationChainSerializer
from apps.public_api.serializers.escalation_chains import EscalationChainUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class EscalationChainView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -48,38 +48,29 @@ class EscalationChainView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelVie
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
description = f"Escalation chain {instance.name} was created"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_CREATED,
description,
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_destroy(self, instance):
instance.delete()
description = f"Escalation chain {instance.name} was deleted"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_DELETED,
description,
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()
def perform_update(self, serializer):
instance = serializer.instance
old_state = instance.repr_settings_for_client_side_logging
prev_state = instance.insight_logs_serialized
serializer.save()
new_state = instance.repr_settings_for_client_side_logging
description = f"Escalation chain {instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
instance.organization,
self.request.user,
OrganizationLogType.TYPE_ESCALATION_CHAIN_CHANGED,
description,
new_state = instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)

View file

@ -7,9 +7,9 @@ from apps.alerts.models import EscalationPolicy
from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers import EscalationPolicySerializer, EscalationPolicyUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class EscalationPolicyView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -50,36 +50,28 @@ class EscalationPolicyView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelVi
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
escalation_chain = instance.escalation_chain
description = (
f"Escalation step '{instance.step_type_verbal}' with order {instance.order} was created for "
f"escalation chain '{escalation_chain.name}'"
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CREATED, description)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
escalation_chain = serializer.instance.escalation_chain
description = (
f"Settings for escalation step of escalation chain '{escalation_chain.name}' was changed "
f"from:\n{old_state}\nto:\n{new_state}"
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_CHANGED, description)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
escalation_chain = instance.escalation_chain
description = (
f"Escalation step '{instance.step_type_verbal}' with order {instance.order} of "
f"escalation chain '{escalation_chain.name}' was deleted"
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ESCALATION_STEP_DELETED, description)
instance.delete()

View file

@ -8,10 +8,10 @@ from apps.alerts.models import AlertReceiveChannel
from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers import IntegrationSerializer, IntegrationUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import FilterSerializerMixin, RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
from .maintaiable_object_mixin import MaintainableObjectMixin
@ -58,20 +58,17 @@ class IntegrationView(
raise NotFound
def perform_update(self, serializer):
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
serializer.instance.organization,
self.request.user,
OrganizationLogType.TYPE_INTEGRATION_CHANGED,
description,
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = instance.organization
user = self.request.user
description = f"Integration {instance.verbal_name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_INTEGRATION_DELETED, description)
write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.DELETED)
instance.delete()

View file

@ -7,10 +7,10 @@ from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers import CustomOnCallShiftSerializer, CustomOnCallShiftUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.schedules.models import CustomOnCallShift
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class CustomOnCallShiftView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -52,28 +52,28 @@ class CustomOnCallShiftView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelV
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
description = (
f"Custom on-call shift with params: {instance.repr_settings_for_client_side_logging} " f"was created"
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CREATED, description)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Settings of custom on-call shift was changed " f"from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_CHANGED, description)
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = (
f"Custom on-call shift " f"with params: {instance.repr_settings_for_client_side_logging} was deleted"
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_ON_CALL_SHIFT_DELETED, description)
instance.delete()

View file

@ -9,10 +9,11 @@ from apps.base.models import UserNotificationPolicy
from apps.public_api.serializers import PersonalNotificationRuleSerializer, PersonalNotificationRuleUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.models import User
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.insight_log import EntityEvent, write_resource_insight_log
class PersonalNotificationView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -72,45 +73,43 @@ class PersonalNotificationView(RateLimitHeadersMixin, UpdateSerializerMixin, Mod
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
old_state = user.repr_settings_for_client_side_logging
instance.delete()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
prev_state = user.insight_logs_serialized
try:
instance.delete()
except UserNotificationPolicyCouldNotBeDeleted:
raise BadRequest(detail="Can't delete last user notification policy")
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_create(self, serializer):
organization = self.request.auth.organization
author = self.request.user
user = serializer.validated_data["user"]
old_state = user.repr_settings_for_client_side_logging
prev_state = user.insight_logs_serialized
serializer.save()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
author,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = user.repr_settings_for_client_side_logging
prev_state = user.insight_logs_serialized
serializer.save()
new_state = user.repr_settings_for_client_side_logging
description = f"User settings for user {user.username} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
organization,
user,
OrganizationLogType.TYPE_USER_SETTINGS_CHANGED,
description,
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)

View file

@ -9,10 +9,10 @@ from apps.alerts.models import ChannelFilter
from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers import ChannelFilterSerializer, ChannelFilterUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import TwentyFivePageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class ChannelFilterView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -60,43 +60,30 @@ class ChannelFilterView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewS
if instance.is_default:
raise BadRequest(detail="Unable to delete default filter")
else:
alert_receive_channel = instance.alert_receive_channel
user = self.request.user
route_verbal = instance.verbal_name_for_clients.capitalize()
description = f"{route_verbal} of integration {alert_receive_channel.verbal_name} was deleted"
create_organization_log(
alert_receive_channel.organization,
user,
OrganizationLogType.TYPE_CHANNEL_FILTER_DELETED,
description,
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
alert_receive_channel = instance.alert_receive_channel
user = self.request.user
route_verbal = instance.verbal_name_for_clients.capitalize()
description = f"{route_verbal} was created for integration {alert_receive_channel.verbal_name}"
create_organization_log(
alert_receive_channel.organization,
user,
OrganizationLogType.TYPE_CHANNEL_FILTER_CREATED,
description,
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
alert_receive_channel = serializer.instance.alert_receive_channel
route_verbal = serializer.instance.verbal_name_for_clients.capitalize()
description = (
f"Settings for {route_verbal} of integration {alert_receive_channel.verbal_name} "
f"was changed from:\n{old_state}\nto:\n{new_state}"
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
create_organization_log(organization, user, OrganizationLogType.TYPE_CHANNEL_FILTER_CHANGED, description)

View file

@ -13,11 +13,11 @@ from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.schedules.ical_utils import ical_export_from_schedule
from apps.schedules.models import OnCallSchedule, OnCallScheduleWeb
from apps.slack.tasks import update_slack_user_group_for_schedules
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.insight_log import EntityEvent, write_resource_insight_log
class OnCallScheduleChannelView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet):
@ -65,18 +65,17 @@ class OnCallScheduleChannelView(RateLimitHeadersMixin, UpdateSerializerMixin, Mo
if instance.user_group is not None:
update_slack_user_group_for_schedules.apply_async((instance.user_group.pk,))
organization = self.request.auth.organization
user = self.request.user
description = f"Schedule {instance.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CREATED, description)
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.CREATED,
)
def perform_update(self, serializer):
if isinstance(serializer.instance, OnCallScheduleWeb):
raise BadRequest(detail="Web schedule update is not enabled through API")
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
prev_state = serializer.instance.insight_logs_serialized
old_user_group = serializer.instance.user_group
updated_schedule = serializer.save()
@ -87,15 +86,21 @@ class OnCallScheduleChannelView(RateLimitHeadersMixin, UpdateSerializerMixin, Mo
if updated_schedule.user_group is not None and updated_schedule.user_group != old_user_group:
update_slack_user_group_for_schedules.apply_async((updated_schedule.user_group.pk,))
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Schedule {serializer.instance.name} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_CHANGED, description)
new_state = serializer.instance.insight_logs_serialized
write_resource_insight_log(
instance=serializer.instance,
author=self.request.user,
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = f"Schedule {instance.name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_SCHEDULE_DELETED, description)
write_resource_insight_log(
instance=instance,
author=self.request.user,
event=EntityEvent.DELETED,
)
instance.delete()

View file

@ -273,7 +273,7 @@ class CustomOnCallShift(models.Model):
return is_finished
def convert_to_ical(self, time_zone="UTC"):
def convert_to_ical(self, time_zone="UTC", allow_empty_users=False):
result = ""
# use shift time_zone if it exists, otherwise use schedule or default time_zone
time_zone = self.time_zone if self.time_zone is not None else time_zone
@ -285,8 +285,10 @@ class CustomOnCallShift(models.Model):
all_rotation_checked = False
users_queue = self.get_rolling_users()
if not users_queue:
if not users_queue and not allow_empty_users:
return result
if not users_queue and allow_empty_users:
users_queue = [[None]]
if self.frequency is None:
users_queue = users_queue[:1]
@ -354,7 +356,10 @@ class CustomOnCallShift(models.Model):
current_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
interval = self.interval or 1
current_event["rrule"]["INTERVAL"] = interval
if "rrule" in current_event:
# when triggering shift previews, there could be no rrule information yet
# (e.g. initial empty weekly rotation has no rrule set)
current_event["rrule"]["INTERVAL"] = interval
current_event_start = current_event["DTSTART"].dt
next_event_start = current_event_start
# Calculate the minimum start date for the next event based on rotation frequency. We don't need to do this
@ -415,7 +420,7 @@ class CustomOnCallShift(models.Model):
) or event.start >= self.rotation_start:
next_event = event
break
else:
elif end_date < event.start:
break
else:
if event.start >= next_event_start:
@ -482,7 +487,8 @@ class CustomOnCallShift(models.Model):
rolling_users = self.rolling_users
for users_dict in rolling_users:
users_list = list(users.filter(pk__in=users_dict.keys()))
users_queue.append(users_list)
if users_list:
users_queue.append(users_list)
return users_queue
def add_rolling_users(self, rolling_users_list):
@ -544,3 +550,65 @@ class CustomOnCallShift(models.Model):
name = f"{schedule.name}-{shift_type_name}-{priority_level}-"
name += "".join(random.choice(string.ascii_lowercase) for _ in range(5))
return name
# Insight logs
@property
def insight_logs_type_verbal(self):
return "oncall_shift"
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
users_verbal = []
if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
if self.rolling_users is not None:
for users_dict in self.rolling_users:
users = self.organization.users.filter(public_primary_key__in=users_dict.values())
users_verbal.extend([user.username for user in users])
else:
users = self.users.all()
users_verbal = [user.username for user in users]
result = {
"name": self.name,
"source": self.get_source_display(),
"type": self.get_type_display(),
"users": users_verbal,
"start": self.start.isoformat(),
"duration": self.duration.seconds,
"priority_level": self.priority_level,
}
if self.type not in (CustomOnCallShift.TYPE_SINGLE_EVENT, CustomOnCallShift.TYPE_OVERRIDE):
result["frequency"] = self.get_frequency_display()
result["interval"] = self.interval
result["week_start"] = self.week_start
result["by_day"] = self.by_day
result["by_month"] = self.by_month
result["by_monthday"] = self.by_monthday
result["rotation_start"] = self.rotation_start.isoformat()
if self.until:
result["until"] = self.until.isoformat()
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
if self.time_zone:
result["time_zone"] = self.time_zone
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
if self.schedule:
result["schedule"] = self.schedule.insight_logs_verbal
result["schedule_id"] = self.schedule.public_primary_key
return result

View file

@ -133,36 +133,6 @@ class OnCallSchedule(PolymorphicModel):
class Meta:
unique_together = ("name", "organization")
@property
def repr_settings_for_client_side_logging(self):
"""
Example of execution:
name: test, team: example, url: None
slack reminder settings: notification frequency: Each shift, current shift notification: Yes,
next shift notification: No, action for slot when no one is on-call: Notify all people in the channel
"""
result = f"name: {self.name}, team: {self.team.name if self.team else 'No team'}"
if self.organization.slack_team_identity:
if self.channel:
SlackChannel = apps.get_model("slack", "SlackChannel")
sti = self.organization.slack_team_identity
slack_channel = SlackChannel.objects.filter(slack_team_identity=sti, slack_id=self.channel).first()
if slack_channel:
result += f", slack channel: {slack_channel.name}"
if self.user_group is not None:
result += f", user group: {self.user_group.handle}"
result += (
f"\nslack reminder settings: "
f"notification frequency: {self.get_notify_oncall_shift_freq_display()}, "
f"current shift notification: {'Yes' if self.mention_oncall_start else 'No'}, "
f"next shift notification: {'Yes' if self.mention_oncall_next else 'No'}, "
f"action for slot when no one is on-call: {self.get_notify_empty_oncall_display()}"
)
return result
def get_icalendars(self):
"""Returns list of calendars. Primary calendar should always be the first"""
calendar_primary = None
@ -327,6 +297,10 @@ class OnCallSchedule(PolymorphicModel):
while pending:
ev = pending.pop(0)
if ev["is_empty"]:
# exclude events without active users
continue
if ev["calendar_type"] == OnCallSchedule.TYPE_ICAL_OVERRIDES:
# include overrides from start
resolved.append(ev)
@ -410,6 +384,47 @@ class OnCallSchedule(PolymorphicModel):
events = merged
return events
# Insight logs
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
result = {
"name": self.name,
}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
if self.organization.slack_team_identity:
if self.channel:
SlackChannel = apps.get_model("slack", "SlackChannel")
sti = self.organization.slack_team_identity
slack_channel = SlackChannel.objects.filter(slack_team_identity=sti, slack_id=self.channel).first()
if slack_channel:
result["slack_channel"] = slack_channel.name
if self.user_group is not None:
result["user_group"] = self.user_group.handle
result["notification_frequency"] = self.get_notify_oncall_shift_freq_display()
result["current_shift_notification"] = self.mention_oncall_start
result["next_shift_notification"] = self.mention_oncall_next
result["notify_empty_oncall"] = self.get_notify_empty_oncall_display
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
return result
class OnCallScheduleICal(OnCallSchedule):
# For the ical schedule both primary and overrides icals are imported via ical url
@ -463,13 +478,17 @@ class OnCallScheduleICal(OnCallSchedule):
)
self.save(update_fields=["cached_ical_file_overrides", "prev_ical_file_overrides", "ical_file_error_overrides"])
# Insight logs
@property
def repr_settings_for_client_side_logging(self):
result = super().repr_settings_for_client_side_logging
result += (
f", primary calendar url: {self.ical_url_primary}, " f"overrides calendar url: {self.ical_url_overrides}"
)
return result
def insight_logs_serialized(self):
res = super().insight_logs_serialized
res["primary_calendar_url"] = self.ical_url_primary
res["overrides_calendar_url"] = self.ical_url_overrides
return res
@property
def insight_logs_type_verbal(self):
return "ical_schedule"
class OnCallScheduleCalendar(OnCallSchedule):
@ -543,16 +562,20 @@ class OnCallScheduleCalendar(OnCallSchedule):
return ical
@property
def repr_settings_for_client_side_logging(self):
result = super().repr_settings_for_client_side_logging
result += f", overrides calendar url: {self.ical_url_overrides}"
return result
def insight_logs_type_verbal(self):
return "calendar_schedule"
@property
def insight_logs_serialized(self):
res = super().insight_logs_serialized
res["overrides_calendar_url"] = self.ical_url_overrides
return res
class OnCallScheduleWeb(OnCallSchedule):
time_zone = models.CharField(max_length=100, default="UTC")
def _generate_ical_file_from_shifts(self, qs, extra_shifts=None):
def _generate_ical_file_from_shifts(self, qs, extra_shifts=None, allow_empty_users=False):
"""Generate iCal events file from custom on-call shifts."""
ical = None
if qs.exists() or extra_shifts is not None:
@ -567,7 +590,7 @@ class OnCallScheduleWeb(OnCallSchedule):
ical = ical_file.replace(end_line, "").strip()
ical = f"{ical}\r\n"
for event in itertools.chain(qs.all(), extra_shifts):
ical += event.convert_to_ical(self.time_zone)
ical += event.convert_to_ical(self.time_zone, allow_empty_users=allow_empty_users)
ical += f"{end_line}\r\n"
return ical
@ -638,7 +661,7 @@ class OnCallScheduleWeb(OnCallSchedule):
custom_shift.public_primary_key = updated_shift_pk
qs = qs.exclude(public_primary_key=updated_shift_pk)
ical_file = self._generate_ical_file_from_shifts(qs, extra_shifts=extra_shifts)
ical_file = self._generate_ical_file_from_shifts(qs, extra_shifts=extra_shifts, allow_empty_users=True)
original_value = getattr(self, ical_attr)
_invalidate_cache(self, ical_property)
@ -653,3 +676,14 @@ class OnCallScheduleWeb(OnCallSchedule):
setattr(self, ical_attr, original_value)
return shift_events, final_events
# Insight logs
@property
def insight_logs_type_verbal(self):
return "web_schedule"
@property
def insight_logs_serialized(self):
res = super().insight_logs_serialized
res["time_zone"] = self.time_zone
return res

View file

@ -44,17 +44,22 @@ def refresh_ical_file(schedule_pk):
if schedule.cached_ical_file_primary is not None:
if schedule.prev_ical_file_primary is None:
run_task_primary = True
task_logger.info(f"run_task_primary {schedule_pk} {run_task_primary} prev_ical_file_primary is None")
else:
run_task_primary = not is_icals_equal(schedule.cached_ical_file_primary, schedule.prev_ical_file_primary)
task_logger.info(f"run_task_primary {schedule_pk} {run_task_primary} icals not equal")
run_task_overrides = False
if schedule.cached_ical_file_overrides is not None:
if schedule.prev_ical_file_overrides is None:
run_task_overrides = True
task_logger.info(f"run_task_overrides {schedule_pk} {run_task_primary} prev_ical_file_overrides is None")
else:
run_task_overrides = not is_icals_equal(
schedule.cached_ical_file_overrides, schedule.prev_ical_file_overrides
)
task_logger.info(f"run_task_overrides {schedule_pk} {run_task_primary} icals not equal")
run_task = run_task_primary or run_task_overrides
if run_task:
notify_about_empty_shifts_in_schedule.apply_async((schedule_pk,))
notify_about_gaps_in_schedule.apply_async((schedule_pk,))

View file

@ -574,6 +574,72 @@ def test_rolling_users_with_diff_start_and_rotation_start_weekly(
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day_weekend(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
today_weekday = now.weekday()
delta_days = (0 - today_weekday) % 7 + (7 if today_weekday == 0 else 0)
next_week_monday = now + timezone.timedelta(days=delta_days)
# SAT, SUN
weekdays = [5, 6]
by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
data = {
"priority_level": 1,
"start": now,
"week_start": 0,
"rotation_start": next_week_monday,
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": next_week_monday + timezone.timedelta(days=30, minutes=1),
"by_day": by_day,
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
first_sat = next_week_monday + timezone.timedelta(days=5) + timezone.timedelta(minutes=5)
user_1_on_call_dates = [first_sat + timezone.timedelta(days=15)]
user_2_on_call_dates = [first_sat, first_sat + timezone.timedelta(days=22)]
user_3_on_call_dates = [first_sat + timezone.timedelta(days=7), first_sat + timezone.timedelta(days=8)]
nobody_on_call_dates = [
now, # less than rotation start
first_sat - timezone.timedelta(days=7), # before rotation start
first_sat + timezone.timedelta(days=9), # weekday value not in by_day
first_sat + timezone.timedelta(days=30), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule

View file

@ -552,6 +552,71 @@ def test_preview_shift(make_organization, make_user_for_organization, make_sched
assert schedule._ical_file_primary == schedule_primary_ical
@pytest.mark.django_db
def test_preview_shift_no_user(make_organization, make_user_for_organization, make_schedule, make_on_call_shift):
organization = make_organization()
schedule = make_schedule(
organization,
schedule_class=OnCallScheduleWeb,
name="test_web_schedule",
)
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_date = now - timezone.timedelta(days=7)
schedule_primary_ical = schedule._ical_file_primary
# proposed shift
new_shift = CustomOnCallShift(
type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
organization=organization,
schedule=schedule,
name="testing",
start=start_date + timezone.timedelta(hours=12),
rotation_start=start_date + timezone.timedelta(hours=12),
duration=timezone.timedelta(seconds=3600),
frequency=CustomOnCallShift.FREQUENCY_DAILY,
priority_level=2,
rolling_users=[],
)
rotation_events, final_events = schedule.preview_shift(new_shift, "UTC", start_date, days=1)
# check rotation events
expected_rotation_events = [
{
"calendar_type": OnCallSchedule.TYPE_ICAL_PRIMARY,
"start": new_shift.start,
"end": new_shift.start + new_shift.duration,
"all_day": False,
"is_override": False,
"is_empty": True,
"is_gap": False,
"priority_level": None,
"missing_users": [],
"users": [],
"shift": {"pk": new_shift.public_primary_key},
"source": "api",
}
]
assert rotation_events == expected_rotation_events
expected_events = []
returned_events = [
{
"end": e["end"],
"start": e["start"],
"user": e["users"][0]["display_name"] if e["users"] else None,
"is_empty": e["is_empty"],
}
for e in final_events
if not e["is_override"] and not e["is_gap"]
]
assert returned_events == expected_events
# final ical schedule didn't change
assert schedule._ical_file_primary == schedule_primary_ical
@pytest.mark.django_db
def test_preview_override_shift(make_organization, make_user_for_organization, make_schedule, make_on_call_shift):
organization = make_organization()

View file

@ -137,8 +137,15 @@ class SlackMessage(models.Model):
else:
text = "{}\nInviting {} to look at incident.".format(alert_group.long_verbose_name, user_verbal)
attachments = [
{"color": "#c6c000", "callback_id": "alert", "text": text}, # yellow
blocks = [
{
"type": "section",
"block_id": "alert",
"text": {
"type": "mrkdwn",
"text": text,
},
}
]
sc = SlackClientWithErrorHandling(self.slack_team_identity.bot_access_token)
channel_id = slack_message.channel_id
@ -147,7 +154,8 @@ class SlackMessage(models.Model):
result = sc.api_call(
"chat.postMessage",
channel=channel_id,
attachments=attachments,
text=text,
blocks=blocks,
thread_ts=slack_message.slack_id,
unfurl_links=True,
)

View file

@ -7,8 +7,8 @@ from django.db.models import JSONField
from apps.slack.constants import SLACK_INVALID_AUTH_RESPONSE, SLACK_WRONG_TEAM_NAMES
from apps.slack.slack_client import SlackClientWithErrorHandling
from apps.slack.slack_client.exceptions import SlackAPIException, SlackAPITokenException
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.constants.role import Role
from common.insight_log.chatops_insight_logs import ChatOpsEvent, ChatOpsType, write_chatops_insight_log
logger = logging.getLogger(__name__)
@ -63,8 +63,9 @@ class SlackTeamIdentity(models.Model):
self.cached_reinstall_data = None
self.installed_via_granular_permissions = True
self.save()
description = f"Slack workspace {self.cached_name} was connected to organization"
create_organization_log(organization, user, OrganizationLogType.TYPE_SLACK_WORKSPACE_CONNECTED, description)
write_chatops_insight_log(
author=user, event_name=ChatOpsEvent.WORKSPACE_CONNECTED, chatops_type=ChatOpsType.SLACK
)
def get_cached_channels(self, search_term=None, slack_id=None):
queryset = self.cached_channels

View file

@ -6,8 +6,8 @@ from jinja2 import TemplateSyntaxError
from rest_framework.response import Response
from apps.slack.scenarios import scenario_step
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.constants.role import Role
from common.insight_log import EntityEvent, write_resource_insight_log
from common.jinja_templater import jinja_template_env
from .step_mixins import CheckAlertIsUnarchivedMixin, IncidentActionsAccessControlMixin
@ -233,7 +233,7 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update().get()
integration = alert_group.channel.integration
alert_receive_channel = alert_group.channel
old_state = alert_receive_channel.repr_settings_for_client_side_logging
prev_state = alert_receive_channel.insight_logs_serialized
for templatizable_attr in ["title", "message", "image_url"]:
for notification_channel in ["slack", "web", "sms", "phone_call", "email", "telegram"]:
@ -308,12 +308,15 @@ class UpdateAppearanceStep(scenario_step.ScenarioStep):
headers={"content-type": "application/json"},
)
new_state = alert_receive_channel.repr_settings_for_client_side_logging
new_state = alert_receive_channel.insight_logs_serialized
if new_state != old_state:
description = f"Integration settings was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
self.organization, self.user, OrganizationLogType.TYPE_INTEGRATION_CHANGED, description
if new_state != prev_state:
write_resource_insight_log(
instance=alert_receive_channel,
author=slack_user_identity.get_user(alert_receive_channel.organization),
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
attachments = alert_group.render_slack_attachments()

View file

@ -192,6 +192,7 @@ class AlertShootingStep(scenario_step.ScenarioStep):
self._slack_client.api_call(
"chat.postMessage",
channel=channel_id,
text=text,
attachments=[],
thread_ts=alert_group.slack_message.slack_id,
mrkdwn=True,
@ -480,10 +481,8 @@ class AttachGroupStep(
alert_group = log_record.alert_group
if log_record.type == AlertGroupLogRecord.TYPE_ATTACHED and log_record.alert_group.is_maintenance_incident:
attachments = [
{"callback_id": "alert", "text": "{}".format(log_record.rendered_log_line_action(for_slack=True))},
]
self._publish_message_to_thread(alert_group, attachments)
text = f"{log_record.rendered_log_line_action(for_slack=True)}"
self.publish_message_to_thread(alert_group, text=text)
if log_record.type == AlertGroupLogRecord.TYPE_FAILED_ATTACHMENT:
ephemeral_text = log_record.rendered_log_line_action(for_slack=True)
@ -612,7 +611,7 @@ class CustomButtonProcessStep(
custom_button = log_record.custom_button
debug_message = ""
if not log_record.step_specific_info["is_request_successful"]:
with suppress(TemplateError):
with suppress(TemplateError, json.JSONDecodeError):
post_kwargs = custom_button.build_post_kwargs(log_record.alert_group.alerts.first())
curl_request = render_curl_command(log_record.custom_button.webhook, "POST", post_kwargs)
debug_message = f"```{curl_request}```"
@ -629,9 +628,9 @@ class CustomButtonProcessStep(
f"according to escalation policy with the result `{result_message}`"
)
attachments = [
{"callback_id": "alert", "text": debug_message, "footer": text},
{"callback_id": "alert", "text": debug_message},
]
self._publish_message_to_thread(alert_group, attachments)
self.publish_message_to_thread(alert_group, attachments=attachments, text=text)
class ResolveGroupStep(
@ -763,23 +762,27 @@ class UnAcknowledgeGroupStep(
message_attachments = [
{
"callback_id": "alert",
"text": f"{user_verbal} hasn't responded to an acknowledge timeout reminder."
f" Incident is unacknowledged automatically",
"text": "",
"footer": "Escalation started again...",
},
]
text = (
f"{user_verbal} hasn't responded to an acknowledge timeout reminder."
f" Incident is unacknowledged automatically"
)
if alert_group.slack_message.ack_reminder_message_ts:
try:
self._slack_client.api_call(
"chat.update",
channel=channel_id,
ts=alert_group.slack_message.ack_reminder_message_ts,
text=text,
attachments=message_attachments,
)
except SlackAPIException as e:
# post to thread if ack reminder message was deleted in Slack
if e.response["error"] == "message_not_found":
self._publish_message_to_thread(alert_group, message_attachments)
self.publish_message_to_thread(alert_group, attachments=message_attachments, text=text)
elif e.response["error"] == "account_inactive":
logger.info(
f"Skip unacknowledge slack message for alert_group {alert_group.pk} due to account_inactive"
@ -787,7 +790,7 @@ class UnAcknowledgeGroupStep(
else:
raise
else:
self._publish_message_to_thread(alert_group, message_attachments)
self.publish_message_to_thread(alert_group, attachments=message_attachments, text=text)
self._update_slack_message(alert_group)
logger.debug(f"Finished process_signal in UnAcknowledgeGroupStep for alert_group {alert_group.pk}")
@ -806,18 +809,12 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
if alert_group.acknowledged_by == AlertGroup.USER:
if self.user == alert_group.acknowledged_by_user:
user_verbal = alert_group.acknowledged_by_user.get_user_verbal_for_team_for_slack()
attachments = [
{
"color": "#c6c000",
"callback_id": "alert",
"text": f"{user_verbal} is confirmed to be working on this incident",
},
]
text = f"{user_verbal} confirmed that the incident is still acknowledged"
self._slack_client.api_call(
"chat.update",
channel=channel,
ts=message_ts,
attachments=attachments,
text=text,
)
alert_group.acknowledged_by_confirmed = datetime.utcnow()
alert_group.save(update_fields=["acknowledged_by_confirmed"])
@ -830,18 +827,12 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
)
elif alert_group.acknowledged_by == AlertGroup.SOURCE:
user_verbal = self.user.get_user_verbal_for_team_for_slack()
attachments = [
{
"color": "#c6c000",
"callback_id": "alert",
"text": f"{user_verbal} is confirmed to be working on this incident",
},
]
text = f"{user_verbal} confirmed that the incident is still acknowledged"
self._slack_client.api_call(
"chat.update",
channel=channel,
ts=message_ts,
attachments=attachments,
text=text,
)
alert_group.acknowledged_by_confirmed = datetime.utcnow()
alert_group.save(update_fields=["acknowledged_by_confirmed"])
@ -865,12 +856,13 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
alert_group = log_record.alert_group
channel_id = alert_group.slack_message.channel_id
user_verbal = log_record.author.get_user_verbal_for_team_for_slack(mention=True)
text = f"{user_verbal}, please confirm that you're still working on this incident."
if alert_group.channel.organization.unacknowledge_timeout != Organization.UNACKNOWLEDGE_TIMEOUT_NEVER:
attachments = [
{
"fallback": "Are you still working on this incident?",
"text": f"{user_verbal}, please confirm that you're still working on this incident.",
"text": text,
"callback_id": "alert",
"attachment_type": "default",
"footer": "This is a reminder that the incident is still acknowledged"
@ -896,6 +888,7 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
response = self._slack_client.api_call(
"chat.postMessage",
channel=channel_id,
text=text,
attachments=attachments,
thread_ts=alert_group.slack_message.slack_id,
)
@ -932,14 +925,8 @@ class AcknowledgeConfirmationStep(AcknowledgeGroupStep):
alert_group.slack_message.ack_reminder_message_ts = response["ts"]
alert_group.slack_message.save(update_fields=["ack_reminder_message_ts"])
else:
attachments = [
{
"callback_id": "alert",
"text": f"This is a reminder that the incident is still acknowledged by {user_verbal}"
f" and not resolved.",
},
]
self._publish_message_to_thread(alert_group, attachments)
text = f"This is a reminder that the incident is still acknowledged by {user_verbal}"
self.publish_message_to_thread(alert_group, text=text)
class WipeGroupStep(scenario_step.ScenarioStep):
@ -953,15 +940,8 @@ class WipeGroupStep(scenario_step.ScenarioStep):
def process_signal(self, log_record):
alert_group = log_record.alert_group
user_verbal = log_record.author.get_user_verbal_for_team_for_slack()
attachments = [
{
"color": "warning",
"callback_id": "alert",
"footer": "Incident wiped",
"text": "Wiped by {}.".format(user_verbal),
},
]
self._publish_message_to_thread(alert_group, attachments)
text = f"Wiped by {user_verbal}"
self.publish_message_to_thread(alert_group, text=text)
self._update_slack_message(alert_group)
@ -1069,21 +1049,15 @@ class UpdateLogReportMessageStep(scenario_step.ScenarioStep):
logger.info(f"Cannot post log message for alert_group {alert_group.pk} because SlackMessage doesn't exist")
return None
attachments = [
{
"text": "Building escalation plan... :thinking_face:",
}
]
text = ("Building escalation plan... :thinking_face:",)
slack_log_message = alert_group.slack_log_message
if slack_log_message is None:
logger.debug(f"Start posting new log message for alert_group {alert_group.pk}")
try:
result = self._slack_client.api_call(
"chat.postMessage",
channel=slack_message.channel_id,
thread_ts=slack_message.slack_id,
attachments=attachments,
"chat.postMessage", channel=slack_message.channel_id, thread_ts=slack_message.slack_id, text=text
)
except SlackAPITokenException as e:
print(e)
@ -1148,6 +1122,7 @@ class UpdateLogReportMessageStep(scenario_step.ScenarioStep):
self._slack_client.api_call(
"chat.update",
channel=slack_message.channel_id,
text="Alert Group log",
ts=slack_log_message.slack_id,
attachments=attachments,
)

View file

@ -34,14 +34,3 @@ class EscalationDeliveryStep(scenario_step.ScenarioStep):
user_mention_as = user_verbal
notify_by = " by {}".format(UserNotificationPolicy.NotificationChannel(notification_channel).label)
return "Inviting {}{} to look at incident.".format(user_mention_as, notify_by)
def notify_thread_about_action(self, alert_group, text, footer=None, color=None):
attachments = [
{
"callback_id": "alert",
"footer": footer,
"text": text,
"color": color,
},
]
self._publish_message_to_thread(alert_group, attachments)

View file

@ -24,7 +24,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Attempt to send an SMS to {user_verbal_with_mention} has been failed due to a plan limit",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -33,7 +32,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Attempt to call to {user_verbal_with_mention} has been failed due to a plan limit",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -42,7 +40,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Failed to send email to {user_verbal_with_mention}. Exceeded limit for mails",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -52,26 +49,31 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Failed to send an SMS to {user_verbal_with_mention}. Phone number is not verified",
alert_group.slack_message.channel_id,
color="red",
)
elif log_record.notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
self.post_message_to_channel(
f"Failed to call to {user_verbal_with_mention}. Phone number is not verified",
alert_group.slack_message.channel_id,
color="red",
)
def post_message_to_channel(self, text, channel, color=None, footer=None):
color_id = self.get_color_id(color)
attachments = [
{"color": color_id, "callback_id": "alert", "footer": footer, "text": text},
def post_message_to_channel(self, text, channel):
blocks = [
{
"type": "section",
"block_id": "alert",
"text": {
"type": "mrkdwn",
"text": text,
},
},
]
try:
# TODO: slack-onprem, check exceptions
self._slack_client.api_call(
"chat.postMessage",
channel=channel,
attachments=attachments,
text=text,
blocks=blocks,
unfurl_links=True,
)
except SlackAPITokenException as e:

View file

@ -287,7 +287,7 @@ class ScenarioStep(object):
raise e
logger.info(f"Finished _update_slack_message for alert_group {alert_group.pk}")
def _publish_message_to_thread(self, alert_group, attachments, mrkdwn=True, unfurl_links=True):
def publish_message_to_thread(self, alert_group, attachments=[], mrkdwn=True, unfurl_links=True, text=None):
# TODO: refactor checking the possibility of sending message to slack
# do not try to post message to slack if integration is rate limited
if alert_group.channel.is_rate_limited_in_slack:
@ -300,6 +300,7 @@ class ScenarioStep(object):
result = self._slack_client.api_call(
"chat.postMessage",
channel=channel_id,
text=text,
attachments=attachments,
thread_ts=slack_message.slack_id,
mrkdwn=mrkdwn,

View file

@ -6,7 +6,7 @@ from django.utils import timezone
from apps.schedules.models import OnCallSchedule
from apps.slack.scenarios import scenario_step
from apps.slack.utils import format_datetime_to_slack
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.insight_log import EntityEvent, write_resource_insight_log
class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep):
@ -57,16 +57,16 @@ class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep):
private_metadata = json.loads(payload["view"]["private_metadata"])
schedule_id = private_metadata["schedule_id"]
schedule = OnCallSchedule.objects.get(pk=schedule_id)
old_state = schedule.repr_settings_for_client_side_logging
prev_state = schedule.insight_logs_serialized
setattr(schedule, action["block_id"], int(action["selected_option"]["value"]))
schedule.save()
new_state = schedule.repr_settings_for_client_side_logging
description = f"Schedule {schedule.name} was changed from:\n{old_state}\nto:\n{new_state}"
create_organization_log(
schedule.organization,
slack_user_identity.get_user(schedule.organization),
OrganizationLogType.TYPE_SCHEDULE_CHANGED,
description,
new_state = schedule.insight_logs_serialized
write_resource_insight_log(
instance=schedule,
author=slack_user_identity.get_user(schedule.organization),
event=EntityEvent.UPDATED,
prev_state=prev_state,
new_state=new_state,
)
def get_modal_blocks(self, schedule_id):

Some files were not shown because too many files have changed in this diff Show more