Merge branch 'dev' into package-updates

This commit is contained in:
Rares Mardare 2022-09-05 15:25:09 +03:00
commit 6c2e70dafe
64 changed files with 1188 additions and 458 deletions

View file

@ -5,7 +5,7 @@ name: Build and Release
steps:
- name: Build Plugin
image: node:14.6.0-stretch
image: node:14.17.0-stretch
commands:
- apt-get update
- apt-get --assume-yes install jq
@ -16,7 +16,7 @@ steps:
- ls ./
- name: Sign and Package Plugin
image: node:14.6.0-stretch
image: node:14.17.0-stretch
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
@ -158,18 +158,11 @@ trigger:
---
kind: pipeline
type: docker
name: OSS Release
name: OSS plugin release
steps:
- name: Check Promote
image: alpine
commands:
- if [ -z "$DRONE_DEPLOY_TO" ]; then echo "Missing DRONE_DEPLOY_TO (Target)"; exit 1; fi
- if [ -z "$DRONE_TAG" ]; then echo "Missing DRONE_TAG"; exit 1; fi
- echo Promoting $DRONE_TAG to $DRONE_DEPLOY_TO
- name: Build Plugin
image: node:14.6.0-stretch
- name: build plugin
image: node:14.17.0-stretch
commands:
- apt-get update
- apt-get --assume-yes install jq
@ -178,23 +171,14 @@ steps:
- yarn --network-timeout 500000
- yarn build
- ls ./
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Sign and Package Plugin
image: node:14.6.0-stretch
- name: sign and package plugin
image: node:14.17.0-stretch
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
depends_on:
- Build Plugin
- build plugin
commands:
- apt-get update
- apt-get install zip
@ -206,7 +190,7 @@ steps:
- zip -r grafana-oncall-app.zip ./grafana-oncall-app
- if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app.zip grafana-oncall-app-${DRONE_TAG}.zip; fi
- name: Publish Plugin to grafana.com (release)
- name: publish plugin to grafana.com (release)
image: curlimages/curl:7.73.0
environment:
GRAFANA_API_KEY:
@ -214,32 +198,36 @@ steps:
commands:
- "curl -f -s -H \"Authorization: Bearer $${GRAFANA_API_KEY}\" -d \"download[any][url]=https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip\" -d \"download[any][md5]=$$(curl -sL https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip | md5sum | cut -d' ' -f1)\" -d url=https://github.com/grafana/oncall/grafana-plugin https://grafana.com/api/plugins"
depends_on:
- Sign and Package Plugin
- sign and package plugin
- name: Image Tag
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
kind: pipeline
type: docker
name: OSS engine release (amd64)
platform:
os: linux
arch: amd64
steps:
- name: set engine version
image: alpine
commands:
- apk add --no-cache bash git sed
- git fetch origin --tags
- chmod +x ./tools/image-tag.sh
- echo $(./tools/image-tag.sh)
- echo $(./tools/image-tag.sh) > .tags
- apk add --no-cache bash sed
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
- cat engine/settings/base.py | grep VERSION | head -1
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Build and Push Engine Docker Image Backend to Dockerhub
- name: build and push docker image
image: plugins/docker
settings:
repo: grafana/oncall
tags: ${DRONE_TAG}-amd64-linux
dockerfile: engine/Dockerfile
context: engine/
password:
@ -247,21 +235,94 @@ steps:
username:
from_secret: docker_username
depends_on:
- Image Tag
- name: Unrecognized Promote Target
image: alpine
commands:
- echo $DRONE_DEPLOY_TO is not a recognized promote target!
- exit 1
when:
target:
exclude:
- oss
- set engine version
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
kind: pipeline
type: docker
name: OSS engine release (arm64)
platform:
os: linux
arch: arm64
steps:
- name: set engine version
image: alpine
commands:
- apk add --no-cache bash sed
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
- cat engine/settings/base.py | grep VERSION | head -1
- name: build and push docker image
image: plugins/docker
settings:
repo: grafana/oncall
tags: ${DRONE_TAG}-arm64-linux
dockerfile: engine/Dockerfile
context: engine/
password:
from_secret: docker_password
username:
from_secret: docker_username
depends_on:
- set engine version
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
depends_on:
- OSS engine release (amd64)
- OSS engine release (arm64)
kind: pipeline
type: docker
name: manifest
steps:
- name: manifest tag
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "grafana/oncall:${DRONE_TAG}"
template: "grafana/oncall:${DRONE_TAG}-ARCH-OS"
platforms:
- linux/amd64
- linux/arm64
- name: manifest latest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
target: "grafana/oncall:latest"
template: "grafana/oncall:${DRONE_TAG}-ARCH-OS"
platforms:
- linux/amd64
- linux/arm64
trigger:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
---
# Secret for pulling docker images.
@ -334,6 +395,6 @@ kind: secret
name: drone_token
---
kind: signature
hmac: a74dd831a3d0a87b8fc1db45699a6a834ea769da9f437c55979ae665948c3b3f
hmac: 8a060649c132677ba1b5693b5ac6c846c02f9a5bb645fe990b26a7ea42a0fb66
...

View file

@ -15,7 +15,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 14
node-version: 14.17.0
- name: Build
run: |
pip install $(grep "pre-commit" engine/requirements.txt)

View file

@ -16,7 +16,7 @@ jobs:
python-version: '3.9'
- uses: actions/setup-node@v3
with:
node-version: 14
node-version: 14.17.0
- uses: snyk/actions/setup@master
- name: Install Dependencies
run: |

View file

@ -1,5 +1,28 @@
# Change Log
## v1.0.32 (2022-09-01)
- Bug fixes
## v1.0.31 (2022-09-01)
- Bump celery version
- Fix oss to cloud connection
## v1.0.30 (2022-08-31)
- Bug fix: check user notification policy before access
## v1.0.29 (2022-08-31)
- Add arm64 docker image
## v1.0.28 (2022-08-31)
- Bug fixes
## v1.0.27 (2022-08-30)
- Bug fixes
## v1.0.26 (2022-08-26)
- Insight log's format fixes
- Remove UserNotificationPolicy auto-recreating
## v1.0.25 (2022-08-24)
- Bug fixes
@ -69,7 +92,7 @@
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public api endpoint for for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
- Improve public api endpoint for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
## 1.0.0 (2022-06-14)

View file

@ -1,7 +1,9 @@
* [Developer quickstart](#developer-quickstart)
* [Code style](#code-style)
* [Backend setup](#backend-setup)
* [Frontend setup](#frontend-setup)
* [Slack application setup](#slack-application-setup)
* [Update drone build](#update-drone-build)
* [Troubleshooting](#troubleshooting)
* [ld: library not found for -lssl](#ld-library-not-found-for--lssl)
* [Could not build wheels for cryptography which use PEP 517 and cannot be installed directly](#could-not-build-wheels-for-cryptography-which-use-pep-517-and-cannot-be-installed-directly)
@ -131,6 +133,22 @@ extra_hosts:
For Slack app configuration check our docs: https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup
### Update drone build
The .drone.yml build file must be signed when changes are made to it. Follow these steps:
If you have not installed drone CLI follow [these instructions](https://docs.drone.io/cli/install/)
To sign the .drone.yml file:
```bash
export DRONE_SERVER=https://drone.grafana.net
# Get your drone token from https://drone.grafana.net/account
export DRONE_TOKEN=<Your DRONE_TOKEN>
drone sign --save grafana/oncall .drone.yml
```
## Troubleshooting
### ld: library not found for -lssl
@ -241,18 +259,3 @@ pytest -n4
5. Create a new Django Server run configuration to Run/Debug the engine
- Use a plugin such as EnvFile to load the .env file
- Change port from 8000 to 8080
## Update drone build
The .drone.yml build file must be signed when changes are made to it. Follow these steps:
If you have not installed drone CLI follow [these instructions](https://docs.drone.io/cli/install/)
To sign the .drone.yml file:
```bash
export DRONE_SERVER=https://drone.grafana.net
# Get your drone token from https://drone.grafana.net/account
export DRONE_TOKEN=<Your DRONE_TOKEN>
drone sign --save grafana/oncall .drone.yml
```

View file

@ -16,15 +16,22 @@ weight: 300
# Telegram integration for Grafana OnCall
You can use Telegram to deliver alert group notifications to a dedicated channel, and allow users to perform notification actions.
You can manage alerts either directly in your personal Telegram DMs or in a dedicated team channel.
Each alert group notification is assigned a dedicated discussion. Users can perform notification actions (acknowledge, resolve, silence), create reports, and discuss alerts in the comments section of the discussions.
## Configure Telegram user settings in Grafana OnCall
In case an integration route is not configured to use a Telegram channel, users will receive messages with alert group contents, logs and actions in their DMs.
To receive alert group contents, escalation logs and to be able to perform actions (acknowledge, resolve, silence) in Telegram DMs, please refer to the following steps:
## Connect to Telegram
1. In your profile, find the Telegram setting and click **Connect**.
1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
1. Click **Start** when the OnCall bot messages you and wait for the connection confirmation.
1. Done! Now you can receive alerts directly to your Telegram DMs.
Connect your organization's Telegram account to your Grafana OnCall instance by following the instructions provided in OnCall. You can use the following steps as a reference.
If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
## (Optional) Connect to a Telegram channel
In case you want to manage alerts in a dedicated Telegram channel, please use the following steps as a reference.
> **NOTE:** Only Grafana users with the administrator role can configure OnCall settings.
@ -42,10 +49,5 @@ Connect your organization's Telegram account to your Grafana OnCall instance by
1. In OnCall, send the provided verification code to the channel.
1. Make sure users connect to Telegram in their OnCall user profile.
## Configure Telegram user settings in OnCall
1. In your profile, find the Telegram setting and click **Connect**.
1. Click **Connect automatically** for the bot to message you and to bring up your telegram account.
1. Click **Start** when the OnCall bot messages you.
If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**.
Each alert group is assigned a dedicated discussion. Users can perform actions (acknowledge, resolve, silence), and discuss alerts in the comments section of the discussions.
In case an integration route is not configured to use a Telegram channel, users will receive messages with alert group contents, logs and actions in their DMs.

View file

@ -166,13 +166,11 @@ lt --port 8080 -s pretty-turkey-83 --print-requests
The Telegram integration for Grafana OnCall is designed for collaborative team work and improved incident response. Refer to the following steps to configure the Telegram integration:
1. Ensure your OnCall environment is up and running.
1. Request [BotFather](https://t.me/BotFather) for a key, then add your key in `TELEGRAM_TOKEN` in your Grafana OnCall **Env Variables**.
1. Set `TELEGRAM_WEBHOOK_HOST` with your external URL for your Grafana OnCall.
1. From the **ChatOps** tab in Grafana OnCall, click **Telegram**.
1. Ensure your Grafana OnCall environment is up and running.
2. Create a Telegram bot using [BotFather](https://t.me/BotFather) and save the token provided by BotFather. Please make sure to disable **Group Privacy** for the bot (Bot Settings -> Group Privacy -> Turn off).
3. Paste the token provided by BotFather to the `TELEGRAM_TOKEN` variable on the **Env Variables** page of your Grafana OnCall instance.
4. Set the `TELEGRAM_WEBHOOK_HOST` variable to the external address of your Grafana OnCall instance. Please note that `TELEGRAM_WEBHOOK_HOST` must start with `https://` and be publicly available (meaning that it can be reached by Telegram servers). If your host is private or local, consider using a reverse proxy (e.g. [ngrok](https://ngrok.com)).
5. Now you can connect Telegram accounts on the **Users** page and receive alert groups to Telegram direct messages. Alternatively, in case you want to connect Telegram channels to your Grafana OnCall environment, navigate to the **ChatOps** tab.
## Grafana OSS-Cloud Setup

View file

@ -659,9 +659,7 @@ class IncidentLogBuilder:
# last passed step order + 1
notification_policy_order = last_user_log.notification_policy.order + 1
notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
user=user_to_notify, important=important
)
notification_policies = UserNotificationPolicy.objects.filter(user=user_to_notify, important=important)
for notification_policy in notification_policies:
future_notification = notification_policy.order >= notification_policy_order

View file

@ -1,5 +1,4 @@
from .acknowledge_reminder import acknowledge_reminder_task # noqa: F401
from .cache_alert_group_for_web import cache_alert_group_for_web, schedule_cache_for_alert_group # noqa: F401
from .calculcate_escalation_finish_time import calculate_escalation_finish_time # noqa
from .call_ack_url import call_ack_url # noqa: F401
from .check_escalation_finished import check_escalation_finished_task # noqa: F401
@ -9,7 +8,6 @@ from .custom_button_result import custom_button_result # noqa: F401
from .delete_alert_group import delete_alert_group # noqa: F401
from .distribute_alert import distribute_alert # noqa: F401
from .escalate_alert_group import escalate_alert_group # noqa: F401
from .invalidate_web_cache_for_alert_group import invalidate_web_cache_for_alert_group # noqa: F401, todo: remove
from .invite_user_to_join_incident import invite_user_to_join_incident # noqa: F401
from .maintenance import disable_maintenance # noqa: F401
from .notify_all import notify_all_task # noqa: F401
@ -17,7 +15,6 @@ from .notify_group import notify_group_task # noqa: F401
from .notify_ical_schedule_shift import notify_ical_schedule_shift # noqa: F401
from .notify_user import notify_user_task # noqa: F401
from .resolve_alert_group_by_source_if_needed import resolve_alert_group_by_source_if_needed # noqa: F401
from .resolve_alert_group_if_needed import resolve_alert_group_if_needed # noqa: F401
from .resolve_by_last_step import resolve_by_last_step_task # noqa: F401
from .send_alert_group_signal import send_alert_group_signal # noqa: F401
from .send_update_log_report_signal import send_update_log_report_signal # noqa: F401

View file

@ -1,19 +0,0 @@
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def schedule_cache_for_alert_group(alert_group_pk):
# todo: remove
pass
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=0 if settings.DEBUG else None
)
def cache_alert_group_for_web(alert_group_pk):
# todo: remove
pass

View file

@ -48,11 +48,9 @@ def custom_button_result(custom_button_pk, alert_group_pk, user_pk=None, escalat
except TemplateError:
is_request_successful = False
result_message = "Template error"
except json.JSONDecodeError as e:
task_logger.error(
f"Failed to send build_post_kwargs for alert_group {alert_group_pk}, " f"custom_button {custom_button_pk}"
)
raise e
except json.JSONDecodeError:
is_request_successful = False
result_message = "JSON decoding error"
else:
is_request_successful, result_message = request_outgoing_webhook(
custom_button.webhook, "POST", post_kwargs=post_kwargs

View file

@ -1,11 +0,0 @@
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def invalidate_web_cache_for_alert_group(org_pk=None, channel_pk=None, alert_group_pk=None, alert_group_pks=None):
# todo: remove
pass

View file

@ -58,16 +58,20 @@ def notify_group_task(alert_group_pk, escalation_policy_snapshot_order=None):
if not user.is_notification_allowed:
continue
notification_policies = UserNotificationPolicy.objects.get_or_create_for_user(
notification_policies = UserNotificationPolicy.objects.filter(
user=user,
important=escalation_policy_step == EscalationPolicy.STEP_NOTIFY_GROUP_IMPORTANT,
)
usergroup_notification_plan += "\n_{} (".format(
step.get_user_notification_message_for_thread_for_usergroup(user, notification_policies.first())
)
if notification_policies:
usergroup_notification_plan += "\n_{} (".format(
step.get_user_notification_message_for_thread_for_usergroup(user, notification_policies.first())
)
notification_channels = []
if notification_policies.filter(step=UserNotificationPolicy.Step.NOTIFY).count() == 0:
usergroup_notification_plan += "Empty notifications"
for notification_policy in notification_policies:
if notification_policy.step == UserNotificationPolicy.Step.NOTIFY:
notification_channels.append(

View file

@ -73,9 +73,12 @@ def notify_user_task(
user_has_notification = UserHasNotification.objects.filter(pk=user_has_notification.pk).select_for_update()[0]
if previous_notification_policy_pk is None:
notification_policy = UserNotificationPolicy.objects.get_or_create_for_user(
user=user, important=important
).first()
notification_policy = UserNotificationPolicy.objects.filter(user=user, important=important).first()
if notification_policy is None:
task_logger.info(
f"notify_user_task: Failed to notify. No notification policies. user_id={user_pk} alert_group_id={alert_group_pk} important={important}"
)
return
# Here we collect a brief overview of notification steps configured for user to send it to thread.
collected_steps_ids = []
next_notification_policy = notification_policy.next()

View file

@ -1,31 +0,0 @@
# TODO: remove this file when all the resolve_alert_group_if_needed are processed
# New version - apps.alerts.tasks.resolve_alert_group_by_source_if_needed.resolve_alert_group_by_source_if_needed
from django.apps import apps
from django.conf import settings
from common.custom_celery_tasks import shared_dedicated_queue_retry_task
@shared_dedicated_queue_retry_task(
autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else None
)
def resolve_alert_group_if_needed(alert_id):
"""
The purpose of this task is to avoid computation-heavy check after each alert.
Should be delayed and invoked only for the last one.
"""
AlertGroupForAlertManager = apps.get_model("alerts", "AlertGroupForAlertManager")
AlertForAlertManager = apps.get_model("alerts", "AlertForAlertManager")
alert = AlertForAlertManager.objects.get(pk=alert_id)
if not resolve_alert_group_if_needed.request.id == alert.group.active_resolve_calculation_id:
return "Resolve calculation celery ID mismatch. Duplication or non-active. Active: {}".format(
alert.group.active_resolve_calculation_id
)
else:
# Retrieving group again to have an access to child class methods
alert_group = AlertGroupForAlertManager.all_objects.get(pk=alert.group_id)
if alert_group.is_alert_a_resolve_signal(alert):
alert_group.resolve_by_source()
return f"resolved alert_group {alert_group.pk}"

View file

@ -3,7 +3,6 @@ from datetime import timedelta
import humanize
import pytz
from django.apps import apps
from django.conf import settings
from django.utils import timezone
from rest_framework import fields, serializers
@ -110,25 +109,7 @@ class CurrentOrganizationSerializer(OrganizationSerializer):
def get_limits(self, obj):
user = self.context["request"].user
if not settings.OSS_INSTALLATION:
return obj.notifications_limit_web_report(user)
# show a version warning on OSS installations in case backend and frontend are different versions
frontend_version = self.context["request"].headers.get("X-OnCall-Plugin-Version")
backend_version = settings.VERSION
version_warning = {}
if backend_version and frontend_version and backend_version != frontend_version:
text = (
"Version mismatch! Please make sure you have the same versions of the Grafana OnCall plugin "
"and Grafana OnCall engine, "
"otherwise there could be issues with your Grafana OnCall installation! "
f"Current plugin version: {frontend_version}, current engine version: {backend_version}. "
"Please see the update instructions: "
"https://grafana.com/docs/oncall/latest/open-source/#update-grafana-oncall-oss"
)
version_warning = {"period_title": "Version mismatch", "show_limits_warning": True, "warning_text": text}
return version_warning or obj.notifications_limit_web_report(user)
return obj.notifications_limit_web_report(user)
def get_env_status(self, obj):
LiveSetting.populate_settings_if_needed()

View file

@ -800,6 +800,30 @@ def test_admin_can_unlink_another_user_backend_account(
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_admin_can_unlink_another_user_slack_account(
make_organization_with_slack_team_identity,
make_user_for_organization,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
admin = make_user_for_organization(organization, role=Role.ADMIN)
editor, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": editor.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(admin, token))
assert response.status_code == status.HTTP_200_OK
editor.refresh_from_db()
assert editor.slack_user_identity is None
"""Test user permissions"""
@ -1038,6 +1062,28 @@ def test_user_cant_get_another_user_backend_verification_code(
assert response.status_code == status.HTTP_403_FORBIDDEN
@pytest.mark.django_db
def test_user_can_unlink_own_slack_account(
make_organization_with_slack_team_identity,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
user, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": user.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(user, token))
assert response.status_code == status.HTTP_200_OK
user.refresh_from_db()
assert user.slack_user_identity is None
@pytest.mark.django_db
def test_user_can_unlink_backend_own_account(
make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers
@ -1086,6 +1132,31 @@ def test_user_unlink_backend_backend_account_not_found(
assert response.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_user_cant_unlink_slack_another_user(
make_organization_with_slack_team_identity,
make_user_with_slack_user_identity,
make_token_for_organization,
make_user_auth_headers,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
first_user, slack_user_identity_1 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_1", role=Role.EDITOR
)
second_user, slack_user_identity_2 = make_user_with_slack_user_identity(
slack_team_identity, organization, slack_id="user_2", role=Role.EDITOR
)
_, token = make_token_for_organization(organization)
client = APIClient()
url = reverse("api-internal:user-unlink-slack", kwargs={"pk": first_user.public_primary_key})
response = client.post(url, format="json", **make_user_auth_headers(second_user, token))
assert response.status_code == status.HTTP_403_FORBIDDEN
first_user.refresh_from_db()
assert first_user.slack_user_identity is not None
@pytest.mark.django_db
def test_user_cant_unlink_backend__another_user(
make_organization, make_user_for_organization, make_token_for_organization, make_user_auth_headers

View file

@ -31,7 +31,7 @@ class PublicApiTokenView(
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
write_resource_insight_log(instance=instance, author=instance.author, event=EntityEvent.DELETED)
write_resource_insight_log(instance=instance, author=request.user, event=EntityEvent.DELETED)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)

View file

@ -127,6 +127,7 @@ class UserView(
"get_verification_code",
"get_backend_verification_code",
"get_telegram_verification_code",
"unlink_slack",
"unlink_telegram",
"unlink_backend",
"make_test_call",
@ -146,6 +147,7 @@ class UserView(
"get_verification_code",
"get_backend_verification_code",
"get_telegram_verification_code",
"unlink_slack",
"unlink_telegram",
"unlink_backend",
"make_test_call",
@ -350,6 +352,20 @@ class UserView(
return Response({"telegram_code": str(new_code.uuid), "bot_link": bot_link}, status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def unlink_slack(self, request, pk):
user = self.get_object()
user.slack_user_identity = None
user.save(update_fields=["slack_user_identity"])
write_chatops_insight_log(
author=request.user,
event_name=ChatOpsEvent.USER_UNLINKED,
chatops_type=ChatOpsType.SLACK,
linked_user=user.username,
linked_user_id=user.public_primary_key,
)
return Response(status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def unlink_telegram(self, request, pk):
user = self.get_object()

View file

@ -26,6 +26,7 @@ from apps.base.models.user_notification_policy import BUILT_IN_BACKENDS, Notific
from apps.user_management.models import User
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import UpdateSerializerMixin
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.insight_log import EntityEvent, write_resource_insight_log
@ -55,14 +56,14 @@ class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
except ValueError:
raise BadRequest(detail="Invalid user param")
if user_id is None or user_id == self.request.user.public_primary_key:
queryset = self.model.objects.get_or_create_for_user(user=self.request.user, important=important)
queryset = self.model.objects.filter(user=self.request.user, important=important)
else:
try:
target_user = User.objects.get(public_primary_key=user_id)
except User.DoesNotExist:
raise BadRequest(detail="User does not exist")
queryset = self.model.objects.get_or_create_for_user(user=target_user, important=important)
queryset = self.model.objects.filter(user=target_user, important=important)
queryset = self.serializer_class.setup_eager_loading(queryset)
@ -111,7 +112,10 @@ class UserNotificationPolicyView(UpdateSerializerMixin, ModelViewSet):
def perform_destroy(self, instance):
user = instance.user
prev_state = user.insight_logs_serialized
instance.delete()
try:
instance.delete()
except UserNotificationPolicyCouldNotBeDeleted:
raise BadRequest(detail="Can't delete last user notification policy")
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,

View file

@ -1,32 +1,8 @@
from django.db import IntegrityError, models
from django.db import models
from django.db.models import JSONField
class DynamicSettingsManager(models.Manager):
def get_or_create(self, defaults=None, **kwargs):
"""
Using get_or_create inside celery task sometimes triggers making two identical DynamicSettings.
E.g. https://gitlab.amixr.io/amixr/amixr/issues/843
More info: https://stackoverflow.com/questions/17960593/multipleobjectsreturned-with-get-or-create
Solution is to create UniqueConstraint on DynamicSetting.Name and catch IntegrityError.
Django 3 has built-in check https://github.com/django/django/blob/master/django/db/models/query.py#L571
As for now we are using Django 2.2 which has not.
# TODO: remove this method when we will move to Django 3
So it is overridden get_or_create to catch IntegrityError and just return object in this case.
"""
try:
return super(DynamicSettingsManager, self).get_or_create(defaults=defaults, **kwargs)
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
class DynamicSetting(models.Model):
objects = DynamicSettingsManager()
name = models.CharField(max_length=100)
boolean_value = models.BooleanField(null=True, default=None)
numeric_value = models.IntegerField(null=True, default=None)

View file

@ -4,13 +4,14 @@ from typing import Tuple
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db import models
from django.db.models import Q, QuerySet
from django.utils import timezone
from ordered_model.models import OrderedModel
from apps.base.messaging import get_messaging_backends
from apps.user_management.models import User
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length
@ -69,24 +70,6 @@ def validate_channel_choice(value):
class UserNotificationPolicyQuerySet(models.QuerySet):
def get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
with transaction.atomic():
User.objects.select_for_update().get(pk=user.pk)
return self._get_or_create_for_user(user, important)
def _get_or_create_for_user(self, user: User, important: bool) -> "QuerySet[UserNotificationPolicy]":
notification_policies = super().filter(user=user, important=important)
if notification_policies.exists():
return notification_policies
if important:
policies = self.create_important_policies_for_user(user)
else:
policies = self.create_default_policies_for_user(user)
return policies
def create_default_policies_for_user(self, user: User) -> "QuerySet[UserNotificationPolicy]":
model = self.model
@ -197,6 +180,12 @@ class UserNotificationPolicy(OrderedModel):
else:
return "Not set"
def delete(self):
if UserNotificationPolicy.objects.filter(important=self.important, user=self.user).count() == 1:
raise UserNotificationPolicyCouldNotBeDeleted("Can't delete last user notification policy")
else:
super().delete()
class NotificationChannelOptions:
"""

View file

@ -9,6 +9,7 @@ from apps.base.models.user_notification_policy import (
validate_channel_choice,
)
from apps.base.tests.messaging_backend import TestOnlyBackend
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
@pytest.mark.parametrize(
@ -80,3 +81,25 @@ def test_extra_messaging_backends_details():
)
assert validate_channel_choice(channel_choice) is None
@pytest.mark.django_db
def test_unable_to_delete_last_notification_policy(
make_organization,
make_user_for_organization,
make_user_notification_policy,
):
organization = make_organization()
user = make_user_for_organization(organization)
first_policy = make_user_notification_policy(
user, UserNotificationPolicy.Step.NOTIFY, notify_by=UserNotificationPolicy.NotificationChannel.SLACK
)
second_policy = make_user_notification_policy(
user, UserNotificationPolicy.Step.WAIT, wait_delay=timedelta(minutes=5)
)
first_policy.delete()
with pytest.raises(UserNotificationPolicyCouldNotBeDeleted):
second_policy.delete()

View file

@ -12,6 +12,7 @@ from apps.user_management.models import User
from common.api_helpers.exceptions import BadRequest
from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
from common.exceptions import UserNotificationPolicyCouldNotBeDeleted
from common.insight_log import EntityEvent, write_resource_insight_log
@ -74,7 +75,10 @@ class PersonalNotificationView(RateLimitHeadersMixin, UpdateSerializerMixin, Mod
def perform_destroy(self, instance):
user = self.request.user
prev_state = user.insight_logs_serialized
instance.delete()
try:
instance.delete()
except UserNotificationPolicyCouldNotBeDeleted:
raise BadRequest(detail="Can't delete last user notification policy")
new_state = user.insight_logs_serialized
write_resource_insight_log(
instance=user,

View file

@ -0,0 +1,14 @@
import re
ICAL_DATETIME_START = "DTSTART"
ICAL_DATETIME_END = "DTEND"
ICAL_DATETIME_STAMP = "DTSTAMP"
ICAL_SUMMARY = "SUMMARY"
ICAL_DESCRIPTION = "DESCRIPTION"
ICAL_ATTENDEE = "ATTENDEE"
ICAL_UID = "UID"
ICAL_RRULE = "RRULE"
ICAL_UNTIL = "UNTIL"
RE_PRIORITY = re.compile(r"^\[L(\d)\]")
RE_EVENT_UID_V1 = re.compile(r"amixr-([\w\d-]+)-U(\d+)-E(\d+)-S(\d+)")
RE_EVENT_UID_V2 = re.compile(r"oncall-([\w\d-]+)-PK([\w\d]+)-U(\d+)-E(\d+)-S(\d+)")

View file

@ -1,11 +1,22 @@
from collections import defaultdict
from datetime import datetime
from typing import List
from typing import List, Tuple
from django.apps import apps
from django.utils import timezone
from icalendar import Calendar, Event
from recurring_ical_events import UnfoldableCalendar, compare_greater, is_event, time_span_contains_event
from apps.schedules.constants import (
ICAL_DATETIME_END,
ICAL_DATETIME_STAMP,
ICAL_DATETIME_START,
ICAL_RRULE,
ICAL_UID,
ICAL_UNTIL,
RE_EVENT_UID_V1,
RE_EVENT_UID_V2,
)
from apps.schedules.ical_events.proxy.ical_proxy import IcalService
EXTRA_LOOKUP_DAYS = 16
@ -19,6 +30,17 @@ class AmixrUnfoldableCalendar(UnfoldableCalendar):
So i took part of code from 0.1.20b0 but leave 0.1.16b in requirements.
"""
class RepeatedEvent(UnfoldableCalendar.RepeatedEvent):
class Repetition(UnfoldableCalendar.RepeatedEvent.Repetition):
"""
A repetition of an event. Overridden version of
recurring_ical_events.UnfoldableCalendar.RepeatedEvent.Repetition. This is overridden to remove the 'RRULE'
param from ATTRIBUTES_TO_DELETE_ON_COPY, because the 'UNTIL' param must be stored in repetition events to
calculate its end date.
"""
ATTRIBUTES_TO_DELETE_ON_COPY = ["RDATE", "EXDATE"]
def between(self, start, stop):
"""Return events at a time between start (inclusive) and end (inclusive)"""
span_start = self.to_datetime(start)
@ -83,6 +105,29 @@ class AmixrRecurringIcalEventsAdapter(IcalService):
)
def filter_extra_days(event):
return time_span_contains_event(start_date, end_date, event["DTSTART"].dt, event["DTEND"].dt)
event_start, event_end = self.get_start_and_end_with_respect_to_event_type(event)
return time_span_contains_event(start_date, end_date, event_start, event_end)
return list(filter(filter_extra_days, events))
def get_start_and_end_with_respect_to_event_type(self, event: Event) -> Tuple[timezone.datetime, timezone.datetime]:
"""
Calculate start and end datetime
"""
CustomOnCallShift = apps.get_model("schedules", "CustomOnCallShift")
start = event[ICAL_DATETIME_START].dt
end = event[ICAL_DATETIME_END].dt
match = RE_EVENT_UID_V2.match(event[ICAL_UID]) or RE_EVENT_UID_V1.match(event[ICAL_UID])
# use different calculation rule for events from custom shifts generated at web
if match and int(match.groups()[-1]) == CustomOnCallShift.SOURCE_WEB:
rotation_start = event[ICAL_DATETIME_STAMP].dt
until_rrule = event.get(ICAL_RRULE, {}).get(ICAL_UNTIL)
if until_rrule:
until = until_rrule[0]
end = min(end, until)
start = max(start, rotation_start)
return start, end

View file

@ -1,7 +1,8 @@
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List
from typing import List, Tuple
from django.utils import timezone
from icalendar import Calendar, Event
@ -10,6 +11,10 @@ class IcalService(ABC):
def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
raise NotImplementedError
@abstractmethod
def get_start_and_end_with_respect_to_event_type(self, event: Event) -> Tuple[timezone.datetime, timezone.datetime]:
raise NotImplementedError
class IcalProxy(IcalService):
def __init__(self, ical_adapter: IcalService):
@ -17,3 +22,6 @@ class IcalProxy(IcalService):
def get_events_from_ical_between(self, calendar: Calendar, start_date: datetime, end_date: datetime) -> List[Event]:
return self.ical_adapter.get_events_from_ical_between(calendar, start_date, end_date)
def get_start_and_end_with_respect_to_event_type(self, event: Event) -> Tuple[timezone.datetime, timezone.datetime]:
return self.ical_adapter.get_start_and_end_with_respect_to_event_type(event)

View file

@ -13,6 +13,17 @@ from django.db.models import Q
from django.utils import timezone
from icalendar import Calendar
from apps.schedules.constants import (
ICAL_ATTENDEE,
ICAL_DATETIME_END,
ICAL_DATETIME_START,
ICAL_DESCRIPTION,
ICAL_SUMMARY,
ICAL_UID,
RE_EVENT_UID_V1,
RE_EVENT_UID_V2,
RE_PRIORITY,
)
from apps.schedules.ical_events import ical_events
from common.constants.role import Role
from common.utils import timed_lru_cache
@ -68,15 +79,6 @@ def memoized_users_in_ical(usernames_from_ical, organization):
return users_in_ical(usernames_from_ical, organization)
ICAL_DATETIME_START = "DTSTART"
ICAL_DATETIME_END = "DTEND"
ICAL_SUMMARY = "SUMMARY"
ICAL_DESCRIPTION = "DESCRIPTION"
ICAL_ATTENDEE = "ATTENDEE"
ICAL_UID = "UID"
RE_PRIORITY = re.compile(r"^\[L(\d)\]")
RE_EVENT_UID_V1 = re.compile(r"amixr-([\w\d-]+)-U(\d+)-E(\d+)-S(\d+)")
RE_EVENT_UID_V2 = re.compile(r"oncall-([\w\d-]+)-PK([\w\d]+)-U(\d+)-E(\d+)-S(\d+)")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@ -187,13 +189,11 @@ def get_shifts_dict(calendar, calendar_type, schedule, datetime_start, datetime_
}
)
else:
start = event[ICAL_DATETIME_START].dt.astimezone(pytz.UTC)
end = event[ICAL_DATETIME_END].dt.astimezone(pytz.UTC)
start, end = ical_events.get_start_and_end_with_respect_to_event_type(event)
result_datetime.append(
{
"start": start,
"end": end,
"start": start.astimezone(pytz.UTC),
"end": end.astimezone(pytz.UTC),
"users": users,
"missing_users": missing_users,
"priority": priority,

View file

@ -280,7 +280,7 @@ class CustomOnCallShift(models.Model):
# rolling_users shift converts to several ical events
if self.type in (CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, CustomOnCallShift.TYPE_OVERRIDE):
# generate initial iCal for counting rotation start date
event_ical = self.generate_ical(self.start, user_counter=0)
event_ical = self.generate_ical(self.start)
rotations_created = 0
all_rotation_checked = False
@ -301,13 +301,16 @@ class CustomOnCallShift(models.Model):
if not start: # means that rotation ends before next event starts
all_rotation_checked = True
break
elif start >= self.rotation_start: # event has already started, generate iCal for each user
elif (
self.source == CustomOnCallShift.SOURCE_WEB and start + self.duration > self.rotation_start
) or start >= self.rotation_start:
# event has already started, generate iCal for each user
for user_counter, user in enumerate(users, start=1):
event_ical = self.generate_ical(start, user_counter, user, counter, time_zone)
result += event_ical
rotations_created += 1
else: # generate default iCal to calculate the date for the next rotation
event_ical = self.generate_ical(start, user_counter=0)
event_ical = self.generate_ical(start)
if rotations_created == len(users_queue): # means that we generated iCal for every user group
all_rotation_checked = True
@ -319,14 +322,14 @@ class CustomOnCallShift(models.Model):
result += self.generate_ical(self.start, user_counter, user, time_zone=time_zone)
return result
def generate_ical(self, start, user_counter, user=None, counter=1, time_zone="UTC"):
def generate_ical(self, start, user_counter=0, user=None, counter=1, time_zone="UTC"):
event = Event()
event["uid"] = f"oncall-{self.uuid}-PK{self.public_primary_key}-U{user_counter}-E{counter}-S{self.source}"
if user:
event.add("summary", self.get_summary_with_user_for_ical(user))
event.add("dtstart", self.convert_dt_to_schedule_timezone(start, time_zone))
event.add("dtend", self.convert_dt_to_schedule_timezone(start + self.duration, time_zone))
event.add("dtstamp", timezone.now())
event.add("dtstamp", self.rotation_start)
if self.event_ical_rules:
event.add("rrule", self.event_ical_rules)
try:
@ -381,6 +384,23 @@ class CustomOnCallShift(models.Model):
days_for_next_event += next_month_days
next_event_start = current_event_start + timezone.timedelta(days=days_for_next_event)
end_date = None
# get the period for calculating the current rotation end date for long events with frequency weekly and monthly
if self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
days_diff = 0
# get the last day of the week with respect to the week_start
if next_event_start.weekday() != self.week_start:
days_diff = DAYS_IN_A_WEEK + next_event_start.weekday() - self.week_start
days_diff %= DAYS_IN_A_WEEK
end_date = next_event_start + timezone.timedelta(days=DAYS_IN_A_WEEK - days_diff - ONE_DAY)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
# get the last day of the month
current_day_number = next_event_start.day
number_of_days = monthrange(next_event_start.year, next_event_start.month)[1]
days_diff = number_of_days - current_day_number
end_date = next_event_start + timezone.timedelta(days=days_diff)
next_event = None
# repetitions generate the next event shift according with the recurrence rules
repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
@ -388,12 +408,23 @@ class CustomOnCallShift(models.Model):
)
ical_iter = repetitions.__iter__()
for event in ical_iter:
if event.start >= next_event_start:
next_event = event
break
next_event_dt = next_event.start if next_event is not None else None
if end_date: # end_date exists for long events with frequency weekly and monthly
if end_date >= event.start >= next_event_start:
if (
self.source == CustomOnCallShift.SOURCE_WEB and event.stop > self.rotation_start
) or event.start >= self.rotation_start:
next_event = event
break
elif end_date < event.start:
break
else:
if event.start >= next_event_start:
next_event = event
break
if self.until and next_event_dt and next_event_dt > self.until:
next_event_dt = next_event.start if next_event is not None else next_event_start
if self.until and next_event_dt > self.until:
return
return next_event_dt

View file

@ -44,17 +44,22 @@ def refresh_ical_file(schedule_pk):
if schedule.cached_ical_file_primary is not None:
if schedule.prev_ical_file_primary is None:
run_task_primary = True
task_logger.info(f"run_task_primary {schedule_pk} {run_task_primary} prev_ical_file_primary is None")
else:
run_task_primary = not is_icals_equal(schedule.cached_ical_file_primary, schedule.prev_ical_file_primary)
task_logger.info(f"run_task_primary {schedule_pk} {run_task_primary} icals not equal")
run_task_overrides = False
if schedule.cached_ical_file_overrides is not None:
if schedule.prev_ical_file_overrides is None:
run_task_overrides = True
task_logger.info(f"run_task_overrides {schedule_pk} {run_task_primary} prev_ical_file_overrides is None")
else:
run_task_overrides = not is_icals_equal(
schedule.cached_ical_file_overrides, schedule.prev_ical_file_overrides
)
task_logger.info(f"run_task_overrides {schedule_pk} {run_task_primary} icals not equal")
run_task = run_task_primary or run_task_overrides
if run_task:
notify_about_empty_shifts_in_schedule.apply_async((schedule_pk,))
notify_about_gaps_in_schedule.apply_async((schedule_pk,))

View file

@ -477,7 +477,7 @@ def test_rolling_users_with_diff_start_and_rotation_start_daily(
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
"until": now + timezone.timedelta(days=6, minutes=1),
"until": now + timezone.timedelta(days=6, minutes=10),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
@ -535,7 +535,7 @@ def test_rolling_users_with_diff_start_and_rotation_start_weekly(
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=42, minutes=1),
"until": now + timezone.timedelta(days=42, minutes=10),
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
@ -575,7 +575,7 @@ def test_rolling_users_with_diff_start_and_rotation_start_weekly(
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day_weekend(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
@ -585,7 +585,8 @@ def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
today_weekday = now.weekday()
next_week_monday = now + timezone.timedelta(days=(0 - today_weekday) % 7)
delta_days = (0 - today_weekday) % 7 + (7 if today_weekday == 0 else 0)
next_week_monday = now + timezone.timedelta(days=delta_days)
# SAT, SUN
weekdays = [5, 6]
by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
@ -639,6 +640,75 @@ def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_weekly_by_day(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
):
organization, user_1 = make_organization_and_user()
user_2 = make_user_for_organization(organization)
user_3 = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
today_weekday = now.weekday()
weekdays = [(today_weekday + 1) % 7, (today_weekday + 3) % 7]
by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
data = {
"priority_level": 1,
"start": now,
"week_start": today_weekday,
"rotation_start": now + timezone.timedelta(days=8, hours=1),
"duration": timezone.timedelta(seconds=1800),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=23, minutes=1),
"by_day": by_day,
}
rolling_users = [[user_1], [user_2], [user_3]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
# week 1: weekdays[0] - no (+1 day from start) ; weekdays[1] - no (+3 days from start) user_1
# week 2: weekdays[0] - no (+8 days from start) ; weekdays[1] - yes (+10 days from start) user_2
# week 3: weekdays[0] - yes (+15 days from start) ; weekdays[1] - yes (+17 days from start) user_3
# week 4: weekdays[0] - yes (+22 days from start) ; weekdays[1] - no (+24 days from start) user_1
user_1_on_call_dates = [date + timezone.timedelta(days=22)]
user_2_on_call_dates = [date + timezone.timedelta(days=10)]
user_3_on_call_dates = [date + timezone.timedelta(days=15), date + timezone.timedelta(days=17)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=1), # less than rotation start
date + timezone.timedelta(days=3), # less than rotation start
date + timezone.timedelta(days=8), # less than rotation start
date + timezone.timedelta(days=9), # weekday value not in by_day
date + timezone.timedelta(days=24), # higher than until
]
for dt in user_1_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_1 in users_on_call
for dt in user_2_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_2 in users_on_call
for dt in user_3_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user_3 in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_rolling_users_with_diff_start_and_rotation_start_monthly(
make_organization_and_user, make_user_for_organization, make_on_call_shift, make_schedule
@ -763,6 +833,237 @@ def test_rolling_users_with_diff_start_and_rotation_start_monthly_by_monthday(
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_with_respect_to_rotation_start_and_until_dates_hourly(
make_organization_and_user,
make_on_call_shift,
make_schedule,
):
"""Test calculation start and end event dates for one event with respect to rotation start and until"""
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(minutes=10),
"duration": timezone.timedelta(hours=1),
"frequency": CustomOnCallShift.FREQUENCY_HOURLY,
"schedule": schedule,
"until": now + timezone.timedelta(minutes=40),
"source": CustomOnCallShift.SOURCE_WEB,
}
rolling_users = [[user]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=2)
user_on_call_dates = [date + timezone.timedelta(minutes=10), date + timezone.timedelta(minutes=35)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(minutes=5), # less than rotation start
date + timezone.timedelta(minutes=40), # higher than until
]
for dt in user_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_with_respect_to_rotation_start_and_until_dates_daily(
make_organization_and_user,
make_on_call_shift,
make_schedule,
):
"""Test calculation start and end event dates for one event with respect to rotation start and until"""
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(hours=5),
"duration": timezone.timedelta(days=1),
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
"until": now + timezone.timedelta(hours=15),
"source": CustomOnCallShift.SOURCE_WEB,
}
rolling_users = [[user]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_on_call_dates = [date + timezone.timedelta(hours=5), date + timezone.timedelta(hours=10)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(hours=4), # less than rotation start
date + timezone.timedelta(hours=15), # higher than until
]
for dt in user_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_with_respect_to_rotation_start_and_until_dates_weekly(
make_organization_and_user,
make_on_call_shift,
make_schedule,
):
"""Test calculation start and end event dates for one event with respect to rotation start and until"""
organization, user = make_organization_and_user()
# simple weekly event
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(days=1),
"duration": timezone.timedelta(days=7),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=6),
"week_start": now.weekday(),
"source": CustomOnCallShift.SOURCE_WEB,
}
rolling_users = [[user]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_on_call_dates = [date + timezone.timedelta(days=1), date + timezone.timedelta(days=5)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(hours=23), # less than rotation start
date + timezone.timedelta(days=6), # higher than until
]
for dt in user_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
# weekly event with by_day
schedule_2 = make_schedule(organization, schedule_class=OnCallScheduleWeb)
today_weekday = now.weekday()
weekdays = [today_weekday, (today_weekday + 1) % 7, (today_weekday + 2) % 7, (today_weekday + 5) % 7]
by_day = [CustomOnCallShift.ICAL_WEEKDAY_MAP[day] for day in weekdays]
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(days=1),
"duration": timezone.timedelta(hours=12),
"frequency": CustomOnCallShift.FREQUENCY_WEEKLY,
"schedule": schedule_2,
"until": now + timezone.timedelta(days=4, hours=23),
"week_start": today_weekday,
"by_day": by_day,
"source": CustomOnCallShift.SOURCE_WEB,
}
on_call_shift_2 = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift_2.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_on_call_dates = [date + timezone.timedelta(days=1), date + timezone.timedelta(days=2)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(hours=23), # less than rotation start
date + timezone.timedelta(days=3), # out of by_day
date + timezone.timedelta(days=4), # out of by_day
date + timezone.timedelta(days=5), # higher than until
]
for dt in user_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule_2, dt)
assert len(users_on_call) == 1
assert user in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule_2, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_with_respect_to_rotation_start_and_until_dates_monthly(
make_organization_and_user,
make_on_call_shift,
make_schedule,
):
"""Test calculation start and end event dates for one event with respect to rotation start and until"""
organization, user = make_organization_and_user()
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
now = timezone.now().replace(microsecond=0)
data = {
"priority_level": 1,
"start": now,
"rotation_start": now + timezone.timedelta(days=5),
"duration": timezone.timedelta(days=30),
"frequency": CustomOnCallShift.FREQUENCY_MONTHLY,
"schedule": schedule,
"until": now + timezone.timedelta(days=15),
"source": CustomOnCallShift.SOURCE_WEB,
}
rolling_users = [[user]]
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users(rolling_users)
date = now + timezone.timedelta(minutes=5)
user_on_call_dates = [date + timezone.timedelta(days=5), date + timezone.timedelta(days=10)]
nobody_on_call_dates = [
date, # less than rotation start
date + timezone.timedelta(days=4), # less than rotation start
date + timezone.timedelta(days=15), # higher than until
]
for dt in user_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 1
assert user in users_on_call
for dt in nobody_on_call_dates:
users_on_call = list_users_to_notify_from_ical(schedule, dt)
assert len(users_on_call) == 0
@pytest.mark.django_db
def test_get_oncall_users_for_empty_schedule(
make_organization,

View file

@ -611,7 +611,7 @@ class CustomButtonProcessStep(
custom_button = log_record.custom_button
debug_message = ""
if not log_record.step_specific_info["is_request_successful"]:
with suppress(TemplateError):
with suppress(TemplateError, json.JSONDecodeError):
post_kwargs = custom_button.build_post_kwargs(log_record.alert_group.alerts.first())
curl_request = render_curl_command(log_record.custom_button.webhook, "POST", post_kwargs)
debug_message = f"```{curl_request}```"

View file

@ -24,7 +24,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Attempt to send an SMS to {user_verbal_with_mention} has been failed due to a plan limit",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -33,7 +32,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Attempt to call to {user_verbal_with_mention} has been failed due to a plan limit",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -42,7 +40,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Failed to send email to {user_verbal_with_mention}. Exceeded limit for mails",
alert_group.slack_message.channel_id,
color="red",
)
elif (
log_record.notification_error_code
@ -52,18 +49,14 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
self.post_message_to_channel(
f"Failed to send an SMS to {user_verbal_with_mention}. Phone number is not verified",
alert_group.slack_message.channel_id,
color="red",
)
elif log_record.notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL:
self.post_message_to_channel(
f"Failed to call to {user_verbal_with_mention}. Phone number is not verified",
alert_group.slack_message.channel_id,
color="red",
)
def post_message_to_channel(self, text, channel, color=None, footer=None):
# TODO: No color in blocks, use prefix emoji?
# color_id = self.get_color_id(color)
def post_message_to_channel(self, text, channel):
blocks = [
{
"type": "section",
@ -73,15 +66,6 @@ class NotificationDeliveryStep(scenario_step.ScenarioStep):
"text": text,
},
},
{"type": "divider"},
{
"type": "section",
"block_id": "alert",
"text": {
"type": "mrkdwn",
"text": footer,
},
},
]
try:
# TODO: slack-onprem, check exceptions

View file

@ -260,6 +260,9 @@ class User(models.Model):
# TODO: check whether this signal can be moved to save method of the model
@receiver(post_save, sender=User)
def listen_for_user_model_save(sender, instance, created, *args, **kwargs):
if created:
instance.notification_policies.create_default_policies_for_user(instance)
instance.notification_policies.create_important_policies_for_user(instance)
drop_cached_ical_for_custom_events_for_organization.apply_async(
(instance.organization_id,),
)

View file

@ -25,10 +25,10 @@ if [ -z "$CELERY_WORKER_MAX_TASKS_PER_CHILD" ]; then
fi
CELERY_ARGS=(
"--quiet" # --quite parameter removes pointless banner when celery starts
"-A" "engine"
"worker"
"-l" "info"
"--quiet" # --quite parameter removes pointless banner when celery starts
"--concurrency=$CELERY_WORKER_CONCURRENCY"
"--max-tasks-per-child=$CELERY_WORKER_MAX_TASKS_PER_CHILD"
"-Q" "$CELERY_WORKER_QUEUE"

View file

@ -1 +1,6 @@
from .exceptions import MaintenanceCouldNotBeStartedError, TeamCanNotBeChangedError, UnableToSendDemoAlert # noqa: F401
from .exceptions import ( # noqa: F401
MaintenanceCouldNotBeStartedError,
TeamCanNotBeChangedError,
UnableToSendDemoAlert,
UserNotificationPolicyCouldNotBeDeleted,
)

View file

@ -17,3 +17,7 @@ class TeamCanNotBeChangedError(OperationCouldNotBePerformedError):
class UnableToSendDemoAlert(OperationCouldNotBePerformedError):
pass
class UserNotificationPolicyCouldNotBeDeleted(OperationCouldNotBePerformedError):
pass

View file

@ -68,6 +68,7 @@ from apps.telegram.tests.factories import (
TelegramVerificationCodeFactory,
)
from apps.twilioapp.tests.factories import PhoneCallFactory, SMSFactory
from apps.user_management.models.user import User, listen_for_user_model_save
from apps.user_management.tests.factories import OrganizationFactory, TeamFactory, UserFactory
from common.constants.role import Role
@ -150,7 +151,9 @@ def make_organization():
@pytest.fixture
def make_user_for_organization():
def _make_user_for_organization(organization, role=Role.ADMIN, **kwargs):
post_save.disconnect(listen_for_user_model_save, sender=User)
user = UserFactory(organization=organization, role=role, **kwargs)
post_save.disconnect(listen_for_user_model_save, sender=User)
return user
return _make_user_for_organization

View file

@ -5,16 +5,14 @@ whitenoise==5.3.0
twilio~=6.37.0
phonenumbers==8.10.0
django-ordered-model==3.1.1
celery==4.3.0
celery==5.2.7
redis==3.2.0
django-celery-results==1.0.4
humanize==0.5.1
uwsgi==2.0.20
django-cors-headers==3.7.0
django-debug-toolbar==3.2.1
django-sns-view==0.1.2
kombu==4.5.0
python-telegram-bot==11.1.0
python-telegram-bot==13.13
django-silk==4.1.0
django-redis-cache==3.0.0
hiredis==1.0.0

View file

@ -75,7 +75,9 @@ SENDGRID_SECRET_KEY = os.environ.get("SENDGRID_SECRET_KEY")
SENDGRID_INBOUND_EMAIL_DOMAIN = os.environ.get("SENDGRID_INBOUND_EMAIL_DOMAIN")
# For Grafana Cloud integration
GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get("GRAFANA_CLOUD_ONCALL_API_URL", "https://a-prod-us-central-0.grafana.net")
GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get(
"GRAFANA_CLOUD_ONCALL_API_URL", "https://oncall-prod-us-central-0.grafana.net/oncall"
)
GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None)
# Outgoing webhook settings
@ -130,7 +132,6 @@ INSTALLED_APPS = [
"apps.grafana_plugin",
"apps.grafana_plugin_management",
"apps.migration_tool",
"django_celery_results",
"corsheaders",
"debug_toolbar",
"social_django",

View file

@ -83,9 +83,6 @@ CELERY_TASK_ROUTES = {
"apps.alerts.tasks.create_contact_points_for_datasource.create_contact_points_for_datasource": {"queue": "default"},
"apps.alerts.tasks.sync_grafana_alerting_contact_points.sync_grafana_alerting_contact_points": {"queue": "default"},
"apps.alerts.tasks.delete_alert_group.delete_alert_group": {"queue": "default"},
"apps.alerts.tasks.invalidate_web_cache_for_alert_group.invalidate_web_cache_for_alert_group": {
"queue": "default"
}, # todo: remove
"apps.alerts.tasks.send_alert_group_signal.send_alert_group_signal": {"queue": "default"},
"apps.alerts.tasks.wipe.wipe": {"queue": "default"},
"apps.heartbeat.tasks.heartbeat_checkup": {"queue": "default"},

View file

@ -1,5 +1,38 @@
# Change Log
## v1.0.32 (2022-09-01)
- Bug fixes
## v1.0.31 (2022-09-01)
- Bump celery version
- Fix oss to cloud connection
## v1.0.30 (2022-08-31)
- Bug fix: check user notification policy before access
## v1.0.29 (2022-08-31)
- Add arm64 docker image
## v1.0.28 (2022-08-31)
- Bug fixes
## v1.0.27 (2022-08-30)
- Bug fixes
## v1.0.26 (2022-08-26)
- Insight log's format fixes
- Remove UserNotificationPolicy auto-recreating
## v1.0.25 (2022-08-24)
- Bug fixes
## v1.0.24 (2022-08-24)
- Insight logs
- Default DATA_UPLOAD_MAX_MEMORY_SIZE to 1mb
## v1.0.23 (2022-08-23)
- Bug fixes
## v1.0.22 (2022-08-16)
- Make STATIC_URL configurable from environment variable
@ -59,7 +92,7 @@
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public api endpoint for for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
- Improve public api endpoint for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
## 1.0.0 (2022-06-14)

View file

@ -14,8 +14,8 @@ import { rootStore } from 'state';
import { useStore } from 'state/useStore';
import { useNavModel } from 'utils/hooks';
import './vars.css';
import './index.css';
import './style/vars.css';
import './style/index.css';
import { AppFeature } from './state/features';
@ -91,6 +91,7 @@ export const Root = observer((props: AppRootProps) => {
const pathWithoutLeadingSlash = path.replace(/^\//, '');
const store = useStore();
const { backendLicense } = store;
useEffect(() => {
store.updateBasicData();
@ -120,11 +121,13 @@ export const Root = observer((props: AppRootProps) => {
grafanaUser: window.grafanaBootData.user,
enableLiveSettings: store.hasFeature(AppFeature.LiveSettings),
enableCloudPage: store.hasFeature(AppFeature.CloudConnection),
backendLicense,
}),
[meta, pathWithoutLeadingSlash, page, store.features]
)
);
useEffect(() => {
/* @ts-ignore */
onNavChanged(navModel);
}, [navModel, onNavChanged]);

View file

@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="#F5B73D">
<path d="M8 .2l4.9 15.2L0 6h16L3.1 15.4z"/>
</svg>

After

Width:  |  Height:  |  Size: 132 B

View file

@ -0,0 +1,21 @@
.root {
display: flex;
align-items: center;
}
.navbar-star-icon {
margin-right: 4px;
}
.navbar-heading {
padding: 4px;
margin: 0 0 0 8px;
border: 1px solid var(--gray-9);
width: initial;
font-size: 12px;
}
.navbar-link {
display: flex;
align-items: center;
}

View file

@ -0,0 +1,30 @@
import React from 'react';
import { Card } from '@grafana/ui';
import cn from 'classnames/bind';
import gitHubStarSVG from 'assets/img/github_star.svg';
import { APP_SUBTITLE, GRAFANA_LICENSE_OSS } from 'utils/consts';
import styles from './NavBarSubtitle.module.css';
const cx = cn.bind(styles);
function NavBarSubtitle({ backendLicense }: { backendLicense: string }) {
if (backendLicense === GRAFANA_LICENSE_OSS) {
return (
<div className={cx('root')}>
{APP_SUBTITLE}
<Card heading={undefined} className={cx('navbar-heading')}>
<a href="https://github.com/grafana/oncall" className={cx('navbar-link')} target="_blank" rel="noreferrer">
<img src={gitHubStarSVG} className={cx('navbar-star-icon')} alt="" /> Star us on GitHub
</a>
</Card>
</div>
);
}
return <>{APP_SUBTITLE}</>;
}
export default NavBarSubtitle;

View file

@ -1,5 +1,6 @@
.root {
position: relative;
width: 100%;
}
.scroller {

View file

@ -12,24 +12,27 @@ const cx = cn.bind(styles);
interface SourceCodeProps {
noMaxHeight?: boolean;
showCopyToClipboard?: boolean;
children?: any
}
const SourceCode: FC<SourceCodeProps> = (props) => {
const { children, noMaxHeight = false } = props;
const { children, noMaxHeight = false, showCopyToClipboard = true } = props;
return (
<div className={cx('root')}>
<CopyToClipboard
text={children as string}
onCopy={() => {
openNotification('Copied!');
}}
>
<Button className={cx('button')} variant="primary" icon="copy">
Copy
</Button>
</CopyToClipboard>
{showCopyToClipboard && (
<CopyToClipboard
text={children as string}
onCopy={() => {
openNotification('Copied!');
}}
>
<Button className={cx('button')} variant="primary" icon="copy">
Copy
</Button>
</CopyToClipboard>
)}
<pre
className={cx('scroller', {
'scroller_max-height': !noMaxHeight,

View file

@ -3,13 +3,14 @@ import React, { FC, useEffect, useState, useCallback } from 'react';
import { AppRootProps } from '@grafana/data';
import { getLocationSrv } from '@grafana/runtime';
import { Alert, Button } from '@grafana/ui';
import { Alert } from '@grafana/ui';
import cn from 'classnames/bind';
import { observer } from 'mobx-react';
import PluginLink from 'components/PluginLink/PluginLink';
import { useStore } from 'state/useStore';
import { UserAction } from 'state/userAction';
import { GRAFANA_LICENSE_OSS } from 'utils/consts';
import { useForceUpdate } from 'utils/hooks';
import { getItem, setItem } from 'utils/localStorage';
import sanitize from 'utils/sanitize';
@ -91,11 +92,17 @@ const DefaultPageLayout: FC<DefaultPageLayoutProps> = observer((props) => {
/>
</Alert>
)}
{store.backendLicense === 'OpenSource' &&
{store.backendLicense === GRAFANA_LICENSE_OSS &&
store.backendVersion &&
plugin?.version &&
store.backendVersion !== plugin?.version && (
<Alert className={styles.alert} severity="warning" title={'Version mismatch!'}>
store.backendVersion !== plugin?.version &&
!getItem(`version_mismatch_${store.backendVersion}_${plugin?.version}`) && (
<Alert
className={styles.alert}
severity="warning"
title={'Version mismatch!'}
onRemove={getRemoveAlertHandler(`version_mismatch_${store.backendVersion}_${plugin?.version}`)}
>
Please make sure you have the same versions of the Grafana OnCall plugin and the Grafana OnCall engine,
otherwise there could be issues with your Grafana OnCall installation!
<br />
@ -108,16 +115,6 @@ const DefaultPageLayout: FC<DefaultPageLayoutProps> = observer((props) => {
.
</Alert>
)}
{currentTeam?.limits.show_limits_warning &&
currentTeam?.limits.period_title !== 'Version mismatch' && // don't show version mismatch warning twice
!getItem(currentTeam.limits.warning_text) && (
<Alert
className={styles.alert}
severity="warning"
title={currentTeam?.limits.warning_text}
onRemove={getRemoveAlertHandler(currentTeam?.limits.warning_text)}
/>
)}
{Boolean(
currentTeam &&
currentUser &&

View file

@ -25,7 +25,7 @@ import WithConfirm from 'components/WithConfirm/WithConfirm';
import logo from 'img/logo.svg';
import { makeRequest } from 'network';
import { createGrafanaToken, getPluginSyncStatus, startPluginSync, updateGrafanaToken } from 'state/plugin';
import { openNotification } from 'utils';
import { GRAFANA_LICENSE_OSS } from 'utils/consts';
import { getItem, setItem } from 'utils/localStorage';
import styles from './PluginConfigPage.module.css';
@ -152,7 +152,7 @@ export const PluginConfigPage = (props: Props) => {
setPluginStatusMessage(
`Connected to OnCall${versionInfo}\n - OnCall URL: ${plugin.meta.jsonData.onCallApiUrl}\n - Grafana URL: ${plugin.meta.jsonData.grafanaUrl}`
);
setIsSelfHostedInstall(plugin.meta.jsonData?.license === 'OpenSource');
setIsSelfHostedInstall(plugin.meta.jsonData?.license === GRAFANA_LICENSE_OSS);
setPluginStatusOk(true);
} else {
setPluginStatusMessage(

View file

@ -147,12 +147,16 @@ const TelegramModal = (props: TelegramModalProps) => {
<Icon name="copy" />
</CopyToClipboard>
</span>{' '}
, to the channel.
, to the channel and wait for the confirmation message.
</Text>
</div>
<div className={cx('telegram-instruction-container')}>
<Text>8. Make sure users connect to Telegram in their OnCall user profile.</Text>
<Text>8. Make sure users connect their Telegram accounts in their OnCall user profile.</Text>
</div>
<div className={cx('telegram-instruction-container')}>
<Text>9. Done! Now you can manage alerts in your Telegram workspace.</Text>
</div>
<div className={cx('telegram-instruction-container')}>

View file

@ -5,6 +5,7 @@ import cn from 'classnames/bind';
import PluginLink from 'components/PluginLink/PluginLink';
import Text from 'components/Text/Text';
import WithConfirm from 'components/WithConfirm/WithConfirm';
import { UserSettingsTab } from 'containers/UserSettings/UserSettings.types';
import { User } from 'models/user/user.types';
import { useStore } from 'state/useStore';
@ -32,6 +33,10 @@ const SlackConnector = (props: SlackConnectorProps) => {
onTabChange(UserSettingsTab.SlackInfo);
}, []);
const handleUnlinkSlackAccount = useCallback(() => {
userStore.unlinkSlack(userStore.currentUserPk);
}, []);
return (
<div className={cx('user-item')}>
<Label>Slack username:</Label>
@ -39,6 +44,11 @@ const SlackConnector = (props: SlackConnectorProps) => {
{storeUser.slack_user_identity ? (
<div>
<Text type="secondary"> Slack account is connected</Text>
<WithConfirm title="Are you sure to disconnect Slack account?" confirmText="Disconnect">
<Button size="sm" fill="text" variant="destructive" onClick={handleUnlinkSlackAccount}>
Unlink Slack account
</Button>
</WithConfirm>
</div>
) : teamStore.currentTeam?.slack_team_identity ? (
<div>

View file

@ -33,3 +33,7 @@
white-space: nowrap;
}
}
.page-header__info-block {
flex-grow: 1; /* Stretch the navigation subtitle panel */
}

View file

@ -298,6 +298,10 @@ export class AlertGroupStore extends BaseStore {
});
}
async getPayloadForIncident(pk: Alert['pk']) {
return await makeRequest(`/alerts/${pk}`, {});
}
@action
async getNewIncidentsStats() {
const result = await makeRequest(`${this.path}stats/`, {

View file

@ -37,12 +37,18 @@ export interface TimeLineItem {
type: number;
}
export interface GroupedAlert {
created_at: string;
id: string;
render_for_web: RenderForWeb;
}
export interface Alert {
pk: string;
title: string;
message: string;
image_url: string;
alerts?: any[];
alerts?: GroupedAlert[];
acknowledged: boolean;
created_at: string;
acknowledged_at: string;
@ -53,11 +59,7 @@ export interface Alert {
related_users: User[];
render_after_resolve_report_json?: TimeLineItem[];
render_for_slack: { attachments: any[] };
render_for_web: {
message: any;
title: any;
image_url: string;
};
render_for_web: RenderForWeb;
alerts_count: number;
inside_organization_number: number;
resolved: boolean;
@ -83,3 +85,9 @@ export interface Alert {
has_pormortem?: boolean; // not implemented yet
}
interface RenderForWeb {
message: any;
title: any;
image_url: string;
}

View file

@ -124,6 +124,20 @@ export class UserStore extends BaseStore {
return await makeRequest(`/users/${userPk}/get_backend_verification_code/?backend=${backend}`, {});
};
@action
unlinkSlack = async (userPk: User['pk']) => {
await makeRequest(`/users/${userPk}/unlink_slack/`, {
method: 'POST',
});
const user = await this.getById(userPk);
this.items = {
...this.items,
[user.pk]: user,
};
};
@action
unlinkTelegram = async (userPk: User['pk']) => {
await makeRequest(`/users/${userPk}/unlink_telegram/`, {

View file

@ -2,6 +2,18 @@
margin-top: 24px;
}
.incident-row {
display: flex;
}
.incident-row-left {
flex-grow: 1;
}
.payload-subtitle {
margin-bottom: 16px;
}
.info-row {
width: 100%;
border-bottom: 1px solid rgba(204, 204, 220, 0.15);
@ -44,6 +56,7 @@
.collapse {
margin-top: 16px;
position: relative;
}
.column {

View file

@ -1,4 +1,4 @@
import React, { SyntheticEvent } from 'react';
import React, { useState, SyntheticEvent } from 'react';
import { AppRootProps } from '@grafana/data';
import { getLocationSrv } from '@grafana/runtime';
@ -13,6 +13,8 @@ import {
ToolbarButton,
VerticalGroup,
Field,
Modal,
Tooltip,
} from '@grafana/ui';
import cn from 'classnames/bind';
import { observer } from 'mobx-react';
@ -26,6 +28,7 @@ import Block from 'components/GBlock/Block';
import IntegrationLogo from 'components/IntegrationLogo/IntegrationLogo';
import WrongTeamStub from 'components/NotFoundInTeam/WrongTeamStub';
import PluginLink from 'components/PluginLink/PluginLink';
import SourceCode from 'components/SourceCode/SourceCode';
import Text from 'components/Text/Text';
import AttachIncidentForm from 'containers/AttachIncidentForm/AttachIncidentForm';
import IntegrationSettings from 'containers/IntegrationSettings/IntegrationSettings';
@ -37,9 +40,11 @@ import {
AlertAction,
TimeLineItem,
TimeLineRealm,
GroupedAlert,
} from 'models/alertgroup/alertgroup.types';
import { ResolutionNoteSourceTypesToDisplayName } from 'models/resolution_note/resolution_note.types';
import { WithStoreProps } from 'state/types';
import { useStore } from 'state/useStore';
import { UserAction } from 'state/userAction';
import { withMobXProviderContext } from 'state/withStore';
import { openNotification } from 'utils';
@ -129,8 +134,6 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
const { alertReceiveChannelStore } = store;
const { isMobile } = store;
const { alerts } = store.alertGroupStore;
const incident = alerts.get(id);
@ -174,17 +177,15 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
);
}
const integration = store.alertReceiveChannelStore.getIntegration(incident.alert_receive_channel);
return (
<>
<div className={cx('root')}>
{this.renderHeader()}
<div className={cx('content')}>
<div className={cx('column')}>
{this.renderIncident(incident)}
{this.renderGroupedIncidents()}
{this.renderAttachedIncidents()}
<Incident incident={incident} datetimeReference={this.getIncidentDatetimeReference(incident)} />
<GroupedIncidentsList id={incident.pk} getIncidentDatetimeReference={this.getIncidentDatetimeReference} />
<AttachedIncidentsList id={incident.pk} getUnattachClickHandler={this.getUnattachClickHandler} />
</div>
<div className={cx('column')}>{this.renderTimeline()}</div>
</div>
@ -335,112 +336,6 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
this.setState({ showAttachIncidentForm: true });
};
renderIncident = (incident: Alert) => {
let datetimeReference;
if (incident.last_alert_at || incident.created_at) {
const m = moment(incident.last_alert_at || incident.created_at);
datetimeReference = `(${m.fromNow()}, ${m.toString()})`;
}
return (
<div key={incident.pk} className={cx('incident')}>
<HorizontalGroup wrap>
<Text.Title type="secondary" level={4}>
{incident.inside_organization_number
? `#${incident.inside_organization_number} ${incident.render_for_web.title}`
: incident.render_for_web.title}
</Text.Title>
<Text type="secondary">{datetimeReference}</Text>
</HorizontalGroup>
<div
className={cx('message')}
dangerouslySetInnerHTML={{
__html: sanitize(incident.render_for_web.message),
}}
/>
{incident.render_for_web.image_url && <img className={cx('image')} src={incident.render_for_web.image_url} />}
</div>
);
};
renderGroupedIncidents() {
const {
store,
query: { id },
} = this.props;
const incident = store.alertGroupStore.alerts.get(id);
const alerts = incident.alerts;
if (!alerts) {
return null;
}
const latestAlert = alerts[alerts.length - 1];
const latestAlertMoment = moment(latestAlert.created_at);
return (
<Collapse
headerWithBackground
className={cx('collapse')}
isOpen
label={
<HorizontalGroup wrap>
{incident.alerts_count} Grouped Alerts
<Text type="secondary">
(latest {latestAlertMoment.fromNow()}, {latestAlertMoment.toString()})
</Text>
</HorizontalGroup>
}
contentClassName={cx('incidents-content')}
>
{alerts.map(this.renderIncident)}
</Collapse>
);
}
renderAttachedIncidents = () => {
const {
store,
query: { id },
} = this.props;
const incident = store.alertGroupStore.alerts.get(id);
if (!incident.dependent_alert_groups.length) {
return null;
}
const alerts = incident.dependent_alert_groups;
return (
<Collapse
headerWithBackground
className={cx('collapse')}
isOpen
label={<HorizontalGroup wrap>{incident.dependent_alert_groups.length} Attached Incidents</HorizontalGroup>}
contentClassName={cx('incidents-content')}
>
{alerts.map((incident) => {
return (
<HorizontalGroup key={incident.pk} justify={'space-between'}>
<PluginLink query={{ page: 'incident', id: incident.pk }}>
#{incident.inside_organization_number} {incident.render_for_web.title}
</PluginLink>
{/* <Emoji text={incident.alert_receive_channel?.verbal_name || ''} />*/}
<WithPermissionControl userAction={UserAction.UpdateIncidents}>
<Button size="sm" onClick={this.getUnattachClickHandler(incident.pk)} variant="secondary">
Unattach
</Button>
</WithPermissionControl>
</HorizontalGroup>
);
})}
</Collapse>
);
};
getUnattachClickHandler = (pk: Alert['pk']) => {
const { store } = this.props;
@ -614,6 +509,192 @@ class IncidentPage extends React.Component<IncidentPageProps, IncidentPageState>
store.alertGroupStore.doIncidentAction(alert.pk, AlertAction.unSilence, false);
};
};
getIncidentDatetimeReference = (incident: Alert | GroupedAlert): string => {
let datetimeReference;
if ((incident as Alert).last_alert_at || incident.created_at) {
const m = moment((incident as Alert).last_alert_at || incident.created_at);
datetimeReference = `(${m.fromNow()}, ${m.toString()})`;
}
return datetimeReference;
};
}
function Incident({ incident, datetimeReference }: { incident: Alert; datetimeReference: string }) {
return (
<div key={incident.pk} className={cx('incident')}>
<HorizontalGroup wrap={false}>
<Text.Title type="secondary" level={4}>
{incident.inside_organization_number
? `#${incident.inside_organization_number} ${incident.render_for_web.title}`
: incident.render_for_web.title}
</Text.Title>
<Text type="secondary">{datetimeReference}</Text>
</HorizontalGroup>
<div
className={cx('message')}
dangerouslySetInnerHTML={{
__html: sanitize(incident.render_for_web.message),
}}
/>
{incident.render_for_web.image_url && <img className={cx('image')} src={incident.render_for_web.image_url} />}
</div>
);
}
function GroupedIncidentsList({
id,
getIncidentDatetimeReference,
}: {
id: string;
getIncidentDatetimeReference: (incident: GroupedAlert) => string;
}) {
const store = useStore();
const incident = store.alertGroupStore.alerts.get(id);
const alerts = incident.alerts;
if (!alerts) {
return null;
}
const latestAlert = alerts[alerts.length - 1];
const latestAlertMoment = moment(latestAlert.created_at);
return (
<Collapse
headerWithBackground
className={cx('collapse')}
isOpen
label={
<HorizontalGroup wrap>
{incident.alerts_count} Grouped Alerts
<Text type="secondary">
(latest {latestAlertMoment.fromNow()}, {latestAlertMoment.toString()})
</Text>
</HorizontalGroup>
}
contentClassName={cx('incidents-content')}
>
{alerts.map((alert) => (
<GroupedIncident incident={alert} datetimeReference={getIncidentDatetimeReference(alert)} />
))}
</Collapse>
);
}
function GroupedIncident({ incident, datetimeReference }: { incident: GroupedAlert; datetimeReference: string }) {
const store = useStore();
const [incidentRawResponse, setIncidentRawResponse] = useState<{ id: string; raw_request_data: any }>(undefined);
const [isModalOpen, setIsModalOpen] = useState(false);
const payloadJSON = isModalOpen ? JSON.stringify(incidentRawResponse.raw_request_data, null, 4) : undefined;
return (
<>
{isModalOpen && (
<Modal onDismiss={() => setIsModalOpen(false)} closeOnEscape isOpen={isModalOpen} title="Alert Payload">
<div className={cx('payload-subtitle')}>
<HorizontalGroup>
<Text type="secondary">
{incident.render_for_web.title} - {datetimeReference}
</Text>
</HorizontalGroup>
</div>
<VerticalGroup>
<SourceCode showCopyToClipboard={false}>{payloadJSON}</SourceCode>
<HorizontalGroup justify={'flex-end'}>
<CopyToClipboard
text={payloadJSON}
onCopy={() => {
openNotification('Copied!');
}}
>
<Button className={cx('button')} variant="primary" icon="copy">
Copy to Clipboard
</Button>
</CopyToClipboard>
</HorizontalGroup>
</VerticalGroup>
</Modal>
)}
<div key={incident.id}>
<div className={cx('incident-row')}>
<div className={cx('incident-row-left')}>
<HorizontalGroup wrap={false} justify={'flex-start'}>
<Text.Title type="secondary" level={4}>
{incident.render_for_web.title}
</Text.Title>
<Text type="secondary">{datetimeReference}</Text>
</HorizontalGroup>
</div>
<div className={cx('incident-row-right')}>
<HorizontalGroup wrap={false} justify={'flex-end'}>
<Tooltip placement="top" content="Alert Payload">
<IconButton name="arrow" onClick={() => openIncidentResponse(incident)} />
</Tooltip>
</HorizontalGroup>
</div>
</div>
<div
className={cx('message')}
dangerouslySetInnerHTML={{
__html: sanitize(incident.render_for_web.message),
}}
/>
{incident.render_for_web.image_url && <img className={cx('image')} src={incident.render_for_web.image_url} />}
</div>
</>
);
async function openIncidentResponse(incident: GroupedAlert) {
const currentIncidentRawResponse = await store.alertGroupStore.getPayloadForIncident(incident.id);
setIncidentRawResponse(currentIncidentRawResponse);
setIsModalOpen(true);
}
}
function AttachedIncidentsList({
id,
getUnattachClickHandler,
}: {
id: string;
getUnattachClickHandler(pk: string): void;
}) {
const store = useStore();
const incident = store.alertGroupStore.alerts.get(id);
if (!incident.dependent_alert_groups.length) {
return null;
}
const alerts = incident.dependent_alert_groups;
return (
<Collapse
headerWithBackground
className={cx('collapse')}
isOpen
label={<HorizontalGroup wrap>{incident.dependent_alert_groups.length} Attached Incidents</HorizontalGroup>}
contentClassName={cx('incidents-content')}
>
{alerts.map((incident) => {
return (
<HorizontalGroup key={incident.pk} justify={'space-between'}>
<PluginLink query={{ page: 'incident', id: incident.pk }}>
#{incident.inside_organization_number} {incident.render_for_web.title}
</PluginLink>
{/* <Emoji text={incident.alert_receive_channel?.verbal_name || ''} />*/}
<WithPermissionControl userAction={UserAction.UpdateIncidents}>
<Button size="sm" onClick={() => getUnattachClickHandler(incident.pk)} variant="secondary">
Unattach
</Button>
</WithPermissionControl>
</HorizontalGroup>
);
})}
</Collapse>
);
}
export default withMobXProviderContext(IncidentPage);

View file

@ -103,7 +103,6 @@ class Incidents extends React.Component<IncidentsPageProps, IncidentsPageState>
renderIncidentFilters() {
const { query } = this.props;
const { filters } = this.state;
return (
<div className={cx('filters')}>

View file

@ -3,5 +3,7 @@ import plugin from '../../package.json'; // eslint-disable-line
export const APP_TITLE = 'Grafana OnCall';
export const APP_SUBTITLE = `Developer-friendly incident response (${plugin?.version})`;
export const GRAFANA_LICENSE_OSS = 'OpenSource';
// Reusable breakpoint sizes
export const BREAKPOINT_TABS = 1024;

View file

@ -1,11 +1,12 @@
import { useEffect, useRef, useState } from 'react';
import React, { useEffect, useRef, useState } from 'react';
import { useMemo } from 'react';
import { AppRootProps, NavModelItem } from '@grafana/data';
import NavBarSubtitle from 'components/NavBar/NavBarSubtitle';
import { PageDefinition } from 'pages';
import { APP_TITLE, APP_SUBTITLE } from './consts';
import { APP_TITLE } from './consts';
type Args = {
meta: AppRootProps['meta'];
@ -17,6 +18,7 @@ type Args = {
};
enableLiveSettings: boolean;
enableCloudPage: boolean;
backendLicense: string;
};
export function useForceUpdate() {
@ -24,7 +26,16 @@ export function useForceUpdate() {
return () => setValue((value) => value + 1);
}
export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSettings, enableCloudPage }: Args) {
export function useNavModel({
meta,
pages,
path,
page,
grafanaUser,
enableLiveSettings,
enableCloudPage,
backendLicense,
}: Args) {
return useMemo(() => {
const tabs: NavModelItem[] = [];
@ -54,7 +65,7 @@ export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSe
const node = {
text: APP_TITLE,
img: meta.info.logos.large,
subTitle: APP_SUBTITLE,
subTitle: <NavBarSubtitle backendLicense={backendLicense} />,
url: path,
children: tabs,
};

View file

@ -8,13 +8,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.3
version: 1.0.4
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v1.0.13"
appVersion: "v1.0.32"
dependencies:
- name: cert-manager
version: v1.8.0