Merge remote-tracking branch 'origin/matiasb/fix-final-schedule-event-splitting' into new-schedules

This commit is contained in:
Maxim 2022-08-17 15:39:29 +03:00
commit b0d22de463
22 changed files with 672 additions and 77 deletions

View file

@ -33,20 +33,6 @@ steps:
- zip -r grafana-oncall-app.zip ./grafana-oncall-app
- if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app.zip grafana-oncall-app-${DRONE_TAG}.zip; fi
- name: Publish Plugin to GCS (release)
image: plugins/gcs
settings:
acl: allUsers:READER
source: grafana-plugin/ci/dist/grafana-oncall-app-${DRONE_TAG}.zip
target: grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip
token:
from_secret: gcs_oncall_publisher_key
depends_on:
- Sign and Package Plugin
when:
ref:
- refs/tags/v*.*.*
- name: Publish Plugin to Github (release)
image: plugins/github-release
settings:
@ -60,16 +46,16 @@ steps:
ref:
- refs/tags/v*.*.*
- name: Publish Plugin to grafana.com (release)
image: curlimages/curl:7.73.0
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
commands:
- "curl -f -s -H \"Authorization: Bearer $${GRAFANA_API_KEY}\" -d \"download[any][url]=https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip\" -d \"download[any][md5]=$$(curl -sL https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip | md5sum | cut -d' ' -f1)\" -d url=https://github.com/grafana/oncall/grafana-plugin https://grafana.com/api/plugins"
- name: Publish Plugin to GCS (release)
image: plugins/gcs
settings:
acl: allUsers:READER
source: grafana-plugin/ci/dist/grafana-oncall-app-${DRONE_TAG}.zip
target: grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip
token:
from_secret: gcs_oncall_publisher_key
depends_on:
- Publish Plugin to GCS (release)
- Publish Plugin to Github (release)
- Sign and Package Plugin
when:
ref:
- refs/tags/v*.*.*
@ -142,7 +128,6 @@ steps:
when:
ref:
- refs/heads/dev
- refs/tags/v*.*.*
# Services for Unit Test Backend
services:
@ -170,6 +155,114 @@ trigger:
- refs/heads/dev
- refs/tags/v*.*.*
---
kind: pipeline
type: docker
name: OSS Release
steps:
- name: Check Promote
image: alpine
commands:
- if [ -z "$DRONE_DEPLOY_TO" ]; then echo "Missing DRONE_DEPLOY_TO (Target)"; exit 1; fi
- if [ -z "$DRONE_TAG" ]; then echo "Missing DRONE_TAG"; exit 1; fi
- echo Promoting $DRONE_TAG to $DRONE_DEPLOY_TO
- name: Build Plugin
image: node:14.6.0-stretch
commands:
- apt-get update
- apt-get --assume-yes install jq
- cd grafana-plugin/
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else jq '.version="${DRONE_TAG}"' package.json > package.new && mv package.new package.json && jq '.version' package.json; fi
- yarn --network-timeout 500000
- yarn build
- ls ./
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Sign and Package Plugin
image: node:14.6.0-stretch
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
depends_on:
- Build Plugin
commands:
- apt-get update
- apt-get install zip
- cd grafana-plugin
- yarn sign
- yarn ci-build:finish
- yarn ci-package
- cd ci/dist
- zip -r grafana-oncall-app.zip ./grafana-oncall-app
- if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app.zip grafana-oncall-app-${DRONE_TAG}.zip; fi
- name: Publish Plugin to grafana.com (release)
image: curlimages/curl:7.73.0
environment:
GRAFANA_API_KEY:
from_secret: gcom_plugin_publisher_api_key
commands:
- "curl -f -s -H \"Authorization: Bearer $${GRAFANA_API_KEY}\" -d \"download[any][url]=https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip\" -d \"download[any][md5]=$$(curl -sL https://storage.googleapis.com/grafana-oncall-app/releases/grafana-oncall-app-${DRONE_TAG}.zip | md5sum | cut -d' ' -f1)\" -d url=https://github.com/grafana/oncall/grafana-plugin https://grafana.com/api/plugins"
depends_on:
- Sign and Package Plugin
- name: Image Tag
image: alpine
commands:
- apk add --no-cache bash git sed
- git fetch origin --tags
- chmod +x ./tools/image-tag.sh
- echo $(./tools/image-tag.sh)
- echo $(./tools/image-tag.sh) > .tags
- if [ -z "$DRONE_TAG" ]; then echo "No tag, not modifying version"; else sed "0,/VERSION.*/ s/VERSION.*/VERSION = \"${DRONE_TAG}\"/g" engine/settings/base.py > engine/settings/base.temp && mv engine/settings/base.temp engine/settings/base.py; fi
- cat engine/settings/base.py | grep VERSION | head -1
depends_on:
- Check Promote
when:
event:
- promote
target:
- oss
ref:
- refs/tags/v*.*.*
- name: Build and Push Engine Docker Image Backend to Dockerhub
image: plugins/docker
settings:
repo: grafana/oncall
dockerfile: engine/Dockerfile
context: engine/
password:
from_secret: docker_password
username:
from_secret: docker_username
depends_on:
- Image Tag
- name: Unrecognized Promote Target
image: alpine
commands:
- echo $DRONE_DEPLOY_TO is not a recognized promote target!
- exit 1
when:
target:
exclude:
- oss
trigger:
event:
- promote
---
# Secret for pulling docker images.
kind: secret
@ -241,6 +334,6 @@ kind: secret
name: drone_token
---
kind: signature
hmac: 7621bb1ccfcbec9f92c385670f2b2790859aba25f31c4936997123906fb102c0
hmac: a74dd831a3d0a87b8fc1db45699a6a834ea769da9f437c55979ae665948c3b3f
...

View file

@ -65,3 +65,36 @@ jobs:
cd engine/
pip install -r requirements.txt
./wait_for_test_mysql_start.sh && pytest --ds=settings.ci-test -x
unit-test-backend-postgresql:
runs-on: ubuntu-latest
container: python:3.9
env:
DB_BACKEND: postgresql
DJANGO_SETTINGS_MODULE: settings.ci-test
SLACK_CLIENT_OAUTH_ID: 1
services:
rabbit_test:
image: rabbitmq:3.7.19
env:
RABBITMQ_DEFAULT_USER: rabbitmq
RABBITMQ_DEFAULT_PASS: rabbitmq
postgresql_test:
image: postgres:14.4
env:
POSTGRES_DB: oncall_local_dev
POSTGRES_PASSWORD: local_dev_pwd
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v2
- name: Unit Test Backend
run: |
cd engine/
pip install -r requirements.txt
pytest --ds=settings.ci-test -x

View file

@ -1,5 +1,14 @@
# Change Log
## v1.0.22 (2022-08-16)
- Make STATIC_URL configurable from environment variable
## v1.0.21 (2022-08-12)
- Bug fixes
## v1.0.19 (2022-08-10)
- Bug fixes
## v1.0.15 (2022-08-03)
- Bug fixes

View file

@ -31,6 +31,8 @@
docker-compose -f docker-compose-developer.yml up -d
```
NOTE: to use a PostgreSQL db backend, use the `docker-compose-developer-pg.yml` file instead.
2. Prepare a python environment:
```bash
# Create and activate the virtual environment
@ -45,6 +47,9 @@ pip install -U pip wheel
# Copy and check .env file.
cp .env.example .env
# NOTE: if you want to use the PostgreSQL db backend add DB_BACKEND=postgresql to your .env file;
# currently allowed backend values are `mysql` (default) and `postgresql`
# Apply .env to current terminal.
# For PyCharm it's better to use https://plugins.jetbrains.com/plugin/7861-envfile/
export $(grep -v '^#' .env | xargs -0)

View file

@ -76,6 +76,12 @@ See [Grafana docs](https://grafana.com/docs/grafana/latest/administration/plugin
<a href="https://github.com/grafana/oncall/discussions"><img width="200px" src="docs/img/GH_discussions.png"></a>
<a href="https://slack.grafana.com/"><img width="200px" src="docs/img/slack.png"></a>
## Stargazers over time
[![Stargazers over time](https://starchart.cc/grafana/oncall.svg)](https://starchart.cc/grafana/oncall)
## Further Reading
- *Migration from the PagerDuty* - [Migrator](https://github.com/grafana/oncall/tree/dev/tools/pagerduty-migrator)
- *Documentation* - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/)

View file

@ -0,0 +1,74 @@
version: '3.2'
services:
postgres:
image: postgres:14.4
platform: linux/x86_64
mem_limit: 500m
cpus: 0.5
restart: always
ports:
- 5432:5432
environment:
POSTGRES_DB: oncall_local_dev
POSTGRES_PASSWORD: empty
POSTGRES_INITDB_ARGS: '--encoding=UTF-8'
redis:
image: redis
mem_limit: 100m
cpus: 0.1
restart: always
ports:
- 6379:6379
rabbit:
image: "rabbitmq:3.7.15-management"
mem_limit: 1000m
cpus: 0.5
environment:
RABBITMQ_DEFAULT_USER: "rabbitmq"
RABBITMQ_DEFAULT_PASS: "rabbitmq"
RABBITMQ_DEFAULT_VHOST: "/"
ports:
- 15672:15672
- 5672:5672
mysql-to-create-grafana-db:
image: mysql:5.7
platform: linux/x86_64
mem_limit: 500m
cpus: 0.5
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
restart: always
ports:
- 3306:3306
environment:
MYSQL_ROOT_PASSWORD: empty
MYSQL_DATABASE: grafana
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
timeout: 20s
retries: 10
grafana:
image: "grafana/grafana:9.0.0-beta3"
restart: always
mem_limit: 500m
cpus: 0.5
environment:
GF_DATABASE_TYPE: mysql
GF_DATABASE_HOST: mysql
GF_DATABASE_USER: root
GF_DATABASE_PASSWORD: empty
GF_SECURITY_ADMIN_USER: oncall
GF_SECURITY_ADMIN_PASSWORD: oncall
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app
volumes:
- ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin
ports:
- 3000:3000
depends_on:
mysql-to-create-grafana-db:
condition: service_healthy

View file

@ -65,5 +65,5 @@ services:
ports:
- 3000:3000
depends_on:
mysql:
mysql-to-create-grafana-db:
condition: service_healthy

View file

@ -83,7 +83,7 @@ For more information on Escalation Chains and more ways to customize them, refer
In order for Grafana OnCall to notify you of an alert, you must configure how you want to be notified. Personal notification policies, chatops integrations, and on-call schedules allow you to automate how users are notified of alerts.
### Configure personal notification policies
Personal notification policies determine how a user is notified for a certain type of alert. Administrators can configure how users receive notification for certain types of alerts. For more information on personal notification policies, refer to [Manage users and teams for Grafana OnCall]({{< relref "configure-user-settings/" >}})
Personal notification policies determine how a user is notified for a certain type of alert. Get notified by SMS, phone call, or Slack mentions. Administrators can configure how users receive notification for certain types of alerts. For more information on personal notification policies, refer to [Manage users and teams for Grafana OnCall]({{< relref "configure-user-settings/" >}})
To configure users personal notification policies:

View file

@ -81,7 +81,10 @@ class PluginAuthentication(BaseAuthentication):
@staticmethod
def _get_user(request: Request, organization: Organization) -> User:
context = json.loads(request.headers.get("X-Grafana-Context"))
user_id = context["UserId"]
try:
user_id = context["UserId"]
except KeyError:
user_id = context["UserID"]
try:
return organization.users.get(user_id=user_id)
except User.DoesNotExist:

View file

@ -276,14 +276,18 @@ class OnCallSchedule(PolymorphicModel):
if not events:
return []
# sort schedule events by (type desc, priority desc, start timestamp asc)
events.sort(
key=lambda e: (
-e["calendar_type"] if e["calendar_type"] else 0, # overrides: 1, shifts: 0, gaps: None
-e["priority_level"] if e["priority_level"] else 0,
e["start"],
def apply_sorting(eventlist):
"""Sort events keeping the events priority criteria."""
eventlist.sort(
key=lambda e: (
-e["calendar_type"] if e["calendar_type"] else 0, # overrides: 1, shifts: 0, gaps: None
-e["priority_level"] if e["priority_level"] else 0,
e["start"],
)
)
)
# sort schedule events by (type desc, priority desc, start timestamp asc)
apply_sorting(events)
def _merge_intervals(evs):
"""Keep track of scheduled intervals."""
@ -345,6 +349,9 @@ class OnCallSchedule(PolymorphicModel):
# event ends after current interval, update event start timestamp to match the interval end
# and process the updated event as any other event
ev["start"] = intervals[current_interval_idx][1]
# reorder pending events after updating current event start date
# (ie. insert the event where it should be to keep the order criteria)
apply_sorting(pending)
else:
# done, go to next event
current_event_idx += 1

View file

@ -117,7 +117,7 @@ def test_filter_events_include_gaps(make_organization, make_user_for_organizatio
data = {
"start": start_date + timezone.timedelta(hours=10),
"rotation_start": start_date + timezone.timedelta(days=1, hours=10),
"rotation_start": start_date + timezone.timedelta(hours=10),
"duration": timezone.timedelta(hours=8),
"priority_level": 1,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
@ -192,7 +192,7 @@ def test_filter_events_include_empty(make_organization, make_user_for_organizati
data = {
"start": start_date + timezone.timedelta(hours=10),
"rotation_start": start_date + timezone.timedelta(days=1, hours=10),
"rotation_start": start_date + timezone.timedelta(hours=10),
"duration": timezone.timedelta(hours=8),
"priority_level": 1,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
@ -322,6 +322,73 @@ def test_final_schedule_events(make_organization, make_user_for_organization, ma
assert returned_events == expected_events
@pytest.mark.django_db
def test_final_schedule_splitting_events(
make_organization, make_user_for_organization, make_on_call_shift, make_schedule
):
organization = make_organization()
schedule = make_schedule(
organization,
schedule_class=OnCallScheduleWeb,
name="test_web_schedule",
)
now = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_date = now - timezone.timedelta(days=7)
user_a, user_b, user_c = (make_user_for_organization(organization, username=i) for i in "ABC")
shifts = (
# user, priority, start time (h), duration (hs)
(user_a, 1, 10, 10), # r1-1: 10-20 / A
(user_b, 1, 12, 4), # r1-2: 12-16 / B
(user_c, 2, 15, 3), # r2-1: 15-18 / C
)
for user, priority, start_h, duration in shifts:
data = {
"start": start_date + timezone.timedelta(hours=start_h),
"rotation_start": start_date + timezone.timedelta(hours=start_h),
"duration": timezone.timedelta(hours=duration),
"priority_level": priority,
"frequency": CustomOnCallShift.FREQUENCY_DAILY,
"schedule": schedule,
}
on_call_shift = make_on_call_shift(
organization=organization, shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, **data
)
on_call_shift.add_rolling_users([[user]])
returned_events = schedule.final_events("UTC", start_date, days=1)
expected = (
# start (h), duration (H), user, priority
(10, 5, "A", 1), # 10-15 A
(12, 3, "B", 1), # 12-15 B
(15, 3, "C", 2), # 15-18 C
(18, 2, "A", 1), # 18-20 A
)
expected_events = [
{
"end": start_date + timezone.timedelta(hours=start + duration),
"priority_level": priority,
"start": start_date + timezone.timedelta(hours=start),
"user": user,
}
for start, duration, user, priority in expected
]
returned_events = [
{
"end": e["end"],
"priority_level": e["priority_level"],
"start": e["start"],
"user": e["users"][0]["display_name"] if e["users"] else None,
}
for e in returned_events
if not e["is_gap"]
]
assert returned_events == expected_events
@pytest.mark.django_db
def test_preview_shift(make_organization, make_user_for_organization, make_schedule, make_on_call_shift):
organization = make_organization()

View file

@ -1,4 +1,4 @@
django==3.2.14
django==3.2.15
djangorestframework==3.12.4
slackclient==1.3.0
whitenoise==5.3.0
@ -40,6 +40,7 @@ https://github.com/iskhakov/django-push-notifications/archive/refs/tags/3.0.0-fi
django-mirage-field==1.3.0
django-mysql==4.6.0
PyMySQL==1.0.2
psycopg2-binary==2.9.3
emoji==1.7.0
apns2==0.7.2

View file

@ -81,6 +81,24 @@ GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None)
# Outgoing webhook settings
DANGEROUS_WEBHOOKS_ENABLED = getenv_boolean("DANGEROUS_WEBHOOKS_ENABLED", default=False)
# DB backend defaults
DB_BACKEND = os.environ.get("DB_BACKEND", "mysql")
DB_BACKEND_DEFAULT_VALUES = {
"mysql": {
"USER": "root",
"PORT": "3306",
"OPTIONS": {
"charset": "utf8mb4",
"connect_timeout": 1,
},
},
"postgresql": {
"USER": "postgres",
"PORT": "5432",
"OPTIONS": {},
},
}
# Application definition
INSTALLED_APPS = [
@ -238,7 +256,7 @@ USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_URL = os.environ.get("STATIC_URL", "/static/")
STATIC_ROOT = "./static/"
CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@localhost:5672"

View file

@ -1,8 +1,5 @@
# flake8: noqa: F405
# Workaround to use pymysql instead of mysqlclient
import pymysql
from .base import * # noqa
SECRET_KEY = "u5/IIbuiJR3Y9FQMBActk+btReZ5oOxu+l8MIJQWLfVzESoan5REE6UNSYYEQdjBOcty9CDak2X"
@ -14,18 +11,23 @@ BASE_URL = "http://localhost"
CELERY_BROKER_URL = "amqp://rabbitmq:rabbitmq@rabbit_test:5672"
pymysql.install_as_MySQLdb()
if DB_BACKEND == "mysql":
# Workaround to use pymysql instead of mysqlclient
import pymysql
pymysql.install_as_MySQLdb()
DB_BACKEND_DEFAULT_VALUES[DB_BACKEND]["OPTIONS"] = {"charset": "utf8mb4"}
# Primary database must have the name "default"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "oncall_local_dev",
"USER": "root",
"ENGINE": "django.db.backends.{}".format(DB_BACKEND),
"NAME": os.environ.get("DB_NAME", "oncall_local_dev"),
"USER": os.environ.get("DB_USER", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("USER", "root")),
"PASSWORD": "local_dev_pwd",
"HOST": "mysql_test",
"PORT": "3306",
"OPTIONS": {"charset": "utf8mb4"},
"HOST": "{}_test".format(DB_BACKEND),
"PORT": os.environ.get("DB_PORT", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("PORT", "3306")),
"OPTIONS": DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("OPTIONS", {}),
},
}

View file

@ -1,11 +1,26 @@
import os
import sys
# Workaround to use pymysql instead of mysqlclient
import pymysql
from .base import * # noqa
if DB_BACKEND == "mysql": # noqa
# Workaround to use pymysql instead of mysqlclient
import pymysql
pymysql.install_as_MySQLdb()
DATABASES = {
"default": {
"ENGINE": "django.db.backends.{}".format(DB_BACKEND), # noqa
"NAME": os.environ.get("DB_NAME", "oncall_local_dev"),
"USER": os.environ.get("DB_USER", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("USER", "root")), # noqa
"PASSWORD": os.environ.get("DB_PASSWORD", "empty"),
"HOST": os.environ.get("DB_HOST", "127.0.0.1"),
"PORT": os.environ.get("DB_PORT", DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("PORT", "3306")), # noqa
"OPTIONS": DB_BACKEND_DEFAULT_VALUES.get(DB_BACKEND, {}).get("OPTIONS", {}), # noqa
},
}
SECRET_KEY = os.environ.get("SECRET_KEY", "osMsNM0PqlRHBlUvqmeJ7+ldU3IUETCrY9TrmiViaSmInBHolr1WUlS0OFS4AHrnnkp1vp9S9z1")
MIRAGE_SECRET_KEY = os.environ.get(
@ -13,26 +28,8 @@ MIRAGE_SECRET_KEY = os.environ.get(
)
MIRAGE_CIPHER_IV = os.environ.get("MIRAGE_CIPHER_IV", "tZZa+60zTZO2NRcS")
pymysql.install_as_MySQLdb()
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get("MYSQL_DB_NAME", "oncall_local_dev"),
"USER": os.environ.get("MYSQL_USER", "root"),
"PASSWORD": os.environ.get("MYSQL_PASSWORD"),
"HOST": os.environ.get("MYSQL_HOST", "127.0.0.1"),
"PORT": os.environ.get("MYSQL_PORT", "3306"),
"OPTIONS": {
"charset": "utf8mb4",
"connect_timeout": 1,
},
},
}
TESTING = "pytest" in sys.modules or "unittest" in sys.modules
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",

View file

@ -53,7 +53,6 @@ STATICFILES_DIRS = [
"/etc/app/static",
]
STATIC_ROOT = "./collected_static/"
STATIC_URL = "/static/"
DEBUG = False

View file

@ -0,0 +1,42 @@
terraform {
required_providers {
grafana = {
source = "grafana/grafana"
version = ">= 1.22.0"
}
}
}
provider "grafana" {
alias = "oncall"
oncall_access_token = <YOUR_API_TOKEN>
}
data "grafana_oncall_user" "ikonstantinov" {
provider = grafana.oncall
username = "ikonstantinov"
}
resource "grafana_oncall_integration" "prod_alertmanager" {
provider = grafana.oncall
name = "Prod AM"
type = "alertmanager"
default_route {
escalation_chain_id = grafana_oncall_escalation_chain.default.id
}
}
resource "grafana_oncall_escalation_chain" "default" {
provider = grafana.oncall
name = "default"
}
resource "grafana_oncall_escalation" "notify_me_step" {
provider = grafana.oncall
escalation_chain_id = grafana_oncall_escalation_chain.default.id
type = "notify_persons"
persons_to_notify = [
data.grafana_oncall_user.ikonstantinov.id
]
position = 0
}

View file

@ -0,0 +1,106 @@
terraform {
required_providers {
grafana = {
source = "grafana/grafana"
version = ">= 1.22.0"
}
}
}
provider "grafana" {
alias = "oncall"
oncall_access_token = <YOUR_API_TOKEN>
}
// Users
data "grafana_oncall_user" "ikonstantinov" {
provider = grafana.oncall
username = "ikonstantinov"
}
data "grafana_oncall_user" "mkukuy" {
provider = grafana.oncall
username = "mkukuy"
}
// Schedule
resource "grafana_oncall_schedule" "primary" {
provider = grafana.oncall
name = "Primary"
type = "calendar"
time_zone = "UTC"
shifts = [
grafana_oncall_on_call_shift.week_shift.id
]
}
resource "grafana_oncall_on_call_shift" "week_shift" {
provider = grafana.oncall
name = "Week shift"
type = "rolling_users"
start = "2022-06-01T00:00:00"
duration = 60 * 60 * 24 // 24 hours
frequency = "weekly"
by_day = ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
week_start = "MO"
rolling_users = [
[data.grafana_oncall_user.ikonstantinov.id],
[data.grafana_oncall_user.mkukuy.id]
]
time_zone = "UTC"
}
// Prod Alertmanager Integration
resource "grafana_oncall_integration" "prod_alertmanager" {
provider = grafana.oncall
name = "Prod AM"
type = "alertmanager"
default_route {
escalation_chain_id = grafana_oncall_escalation_chain.default.id
}
}
// Routes
resource "grafana_oncall_route" "critical_route" {
provider = grafana.oncall
integration_id = grafana_oncall_integration.prod_alertmanager.id
escalation_chain_id = grafana_oncall_escalation_chain.critical.id
routing_regex = "\"severity\": \"critical\""
position = 0
}
// Default escalation chain
resource "grafana_oncall_escalation_chain" "default" {
provider = grafana.oncall
name = "default"
}
resource "grafana_oncall_escalation" "wait" {
provider = grafana.oncall
escalation_chain_id = grafana_oncall_escalation_chain.default.id
type = "wait"
duration = 60 * 5
position = 0
}
resource "grafana_oncall_escalation" "notify_schedule" {
provider = grafana.oncall
escalation_chain_id = grafana_oncall_escalation_chain.default.id
type = "notify_on_call_from_schedule"
notify_on_call_from_schedule = grafana_oncall_schedule.primary.id
position = 1
}
// Critical escalation chain
resource "grafana_oncall_escalation_chain" "critical" {
provider = grafana.oncall
name = "critical"
}
resource "grafana_oncall_escalation" "notify_schedule_critical" {
provider = grafana.oncall
escalation_chain_id = grafana_oncall_escalation_chain.critical.id
type = "notify_on_call_from_schedule"
notify_on_call_from_schedule = grafana_oncall_schedule.primary.id
position = 0
}

View file

@ -0,0 +1,75 @@
terraform {
required_providers {
grafana = {
source = "grafana/grafana"
version = ">= 1.22.0"
}
}
}
provider "grafana" {
alias = "oncall"
oncall_access_token = <YOUR_API_TOKEN>
}
// Users
data "grafana_oncall_user" "ikonstantinov" {
provider = grafana.oncall
username = "ikonstantinov"
}
data "grafana_oncall_user" "mkukuy" {
provider = grafana.oncall
username = "mkukuy"
}
// Schedule
resource "grafana_oncall_schedule" "primary" {
provider = grafana.oncall
name = "Primary"
type = "calendar"
time_zone = "UTC"
shifts = [
grafana_oncall_on_call_shift.week_shift.id
]
}
resource "grafana_oncall_on_call_shift" "week_shift" {
provider = grafana.oncall
name = "Week shift"
type = "rolling_users"
start = "2022-06-01T00:00:00"
duration = 60 * 60 * 24 // 24 hours
frequency = "weekly"
by_day = ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
week_start = "MO"
rolling_users = [
[data.grafana_oncall_user.ikonstantinov.id],
[data.grafana_oncall_user.mkukuy.id]
]
time_zone = "UTC"
}
// Prod Alertmanager Integration
resource "grafana_oncall_integration" "prod_alertmanager" {
provider = grafana.oncall
name = "Prod AM"
type = "alertmanager"
default_route {
escalation_chain_id = grafana_oncall_escalation_chain.default.id
}
}
// Default escalation chain
resource "grafana_oncall_escalation_chain" "default" {
provider = grafana.oncall
name = "default"
}
resource "grafana_oncall_escalation" "notify_schedule" {
provider = grafana.oncall
escalation_chain_id = grafana_oncall_escalation_chain.default.id
type = "notify_on_call_from_schedule"
notify_on_call_from_schedule = grafana_oncall_schedule.primary.id
position = 0
}

View file

@ -1,9 +1,65 @@
# Change Log
## v1.0.22 (2022-08-16)
- Make STATIC_URL configurable from environment variable
## v1.0.21 (2022-08-12)
- Bug fixes
## v1.0.19 (2022-08-10)
- Bug fixes
## v1.0.15 (2022-08-03)
- Bug fixes
## v1.0.13 (2022-07-27)
- Optimize alert group list view
- Fix a bug related to Twilio setup
## v1.0.12 (2022-07-26)
- Update push-notifications dependency
- Rework how absolute URLs are built
- Fix to show maintenance windows per team
- Logging improvements
- Internal api to get a schedule final events
## v1.0.10 (2022-07-22)
- Speed-up of alert group web caching
- Internal api for OnCall shifts
## v1.0.9 (2022-07-21)
- Frontend bug fixes & improvements
- Support regex_replace() in templates
- Bring back alert group caching and list view
## v1.0.7 (2022-07-18)
- Backend & frontend bug fixes
- Deployment improvements
- Reshape webhook payload for outgoing webhooks
- Add escalation chain usage info on escalation chains page
- Improve alert group list load speeds and simplify caching system
## v1.0.6 (2022-07-12)
- Manual Incidents enabled for teams
- Fix phone notifications for OSS
- Public API improvements
## v1.0.5 (2022-07-06)
- Bump Django to 3.2.14
- Fix PagerDuty iCal parsing
## 1.0.4 (2022-06-28)
- Allow Telegram DMs without channel connection.
## 1.0.3 (2022-06-27)
- Fix users public api endpoint. Now it returns users with all roles.
- Fix redundant notifications about gaps in schedules.
- Frontend fixes.
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public API endpoint for outgoing webhooks (/actions) by adding ability to create, update and delete
- Improve public api endpoint for for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
## 1.0.0 (2022-06-14)
@ -11,4 +67,4 @@
## 0.0.71 (2022-06-06)
- Initial Commit Release
- Initial Commit Release

View file

@ -15,7 +15,9 @@ export const fillGaps = (events: Event[]) => {
const nextEvent = events[i + 1];
if (nextEvent) {
newEvents.push({ start: event.end, end: nextEvent.start, is_gap: true });
if (nextEvent.start !== event.end) {
newEvents.push({ start: event.end, end: nextEvent.start, is_gap: true });
}
}
}

View file

@ -312,7 +312,7 @@ export class ScheduleStore extends BaseStore {
}
}
shifts.forEach((shift) => {
/*shifts.forEach((shift) => {
for (let i = 0; i < shift.events.length; i++) {
const iEvent = shift.events[i];
@ -325,7 +325,7 @@ export class ScheduleStore extends BaseStore {
}
shift.events = shift.events.filter((event) => !event.merged);
}
});
});*/
shifts.forEach((shift) => {
shift.events = fillGaps(shift.events);