Merge branch 'dev' into oncall-doc-edits

This commit is contained in:
Alyssa Wada 2022-06-21 09:37:15 -06:00 committed by GitHub
commit 7cb20f1ee4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
37 changed files with 528 additions and 155 deletions

View file

@ -26,7 +26,18 @@ jobs:
- name: Lint All
run: |
pre-commit run --all-files
test-technical-documentation:
runs-on: ubuntu-latest
steps:
- name: "Check out code"
uses: "actions/checkout@v3"
- name: "Build website"
# -e HUGO_REFLINKSERRORLEVEL=ERROR prevents merging broken refs with the downside
# that no refs to external content can be used as these refs will not resolve in the
# docs-base image.
run: |
docker run -v ${PWD}/docs/sources:/hugo/content/docs/oncall/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
unit-test-backend:
runs-on: ubuntu-latest

18
.github/workflows/helm_release.yml vendored Normal file
View file

@ -0,0 +1,18 @@
name: helm-release
on:
push:
branches:
- main
jobs:
call-update-helm-repo:
uses: grafana/helm-charts/.github/workflows/update-helm-repo.yaml@main
with:
charts_dir: helm
cr_configfile: helm/cr.yaml
ct_configfile: helm/ct.yaml
secrets:
helm_repo_token: ${{ secrets.GH_BOT_ACCESS_TOKEN }}
# See https://github.com/grafana/helm-charts/blob/main/INTERNAL.md about this key
gpg_key_base64: ${{ secrets.HELM_SIGN_KEY_BASE64 }}

View file

@ -72,3 +72,7 @@ jobs:
source_folder: "docs/sources"
# Append ".x" to target to produce a v<major>.<minor>.x directory.
target_folder: "content/docs/oncall/${{ steps.target.outputs.target }}.x"
# Allow the workflow to succeed if there are no changes to commit.
# This is only going to be true on tags as those events ignore the path
# filter in the workflow `on.push` section.
allow_no_changes: "true"

View file

@ -1,5 +1,14 @@
# Change Log
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public api endpoint for for outgoing webhooks (/actions) by adding ability to create, update and delete outgoing webhook instance
## 1.0.0 (2022-06-14)
- First Public Release
## 0.0.71 (2022-06-06)
- Initial Release
- Initial Commit Release

View file

@ -1,5 +1,12 @@
<img width="400px" src="docs/img/logo.png">
[![Latest Release](https://img.shields.io/github/v/release/grafana/oncall?display_name=tag&sort=semver)](https://github.com/grafana/oncall/releases)
[![License](https://img.shields.io/github/license/grafana/oncall)](https://github.com/grafana/oncall/blob/dev/LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/grafana/oncall)](https://hub.docker.com/r/grafana/oncall/tags)
[![Slack](https://img.shields.io/badge/join%20slack-%23grafana-%2Doncall-brightgreen.svg)](https://slack.grafana.com/)
[![Discussion](https://img.shields.io/badge/discuss-oncall%20forum-orange.svg)](https://github.com/grafana/oncall/discussions)
[![Build Status](https://drone.grafana.net/api/badges/grafana/oncall/status.svg?ref=refs/heads/dev)](https://drone.grafana.net/grafana/oncall)
Developer-friendly incident response with brilliant Slack integration.
<img width="60%" src="screenshot.png">

View file

@ -1,6 +1,7 @@
services:
engine:
image: grafana/oncall
restart: always
ports:
- 8080:8080
command: >
@ -33,8 +34,8 @@ services:
condition: service_started
celery:
# TODO: change to the public image once it's public
image: grafana/oncall
restart: always
command: sh -c "./celery_with_exporter.sh"
environment:
BASE_URL: $DOMAIN
@ -100,8 +101,8 @@ services:
cpus: 0.5
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
restart: always
ports:
- 3306:3306
expose:
- 3306
volumes:
- dbdata:/var/lib/mysql
environment:
@ -117,11 +118,12 @@ services:
mem_limit: 100m
cpus: 0.1
restart: always
ports:
- 6379:6379
expose:
- 6379
rabbitmq:
image: "rabbitmq:3.7.15-management"
restart: always
hostname: rabbitmq
mem_limit: 1000m
cpus: 0.5
@ -144,6 +146,7 @@ services:
grafana:
image: "grafana/grafana:9.0.0-beta3"
restart: always
mem_limit: 500m
ports:
- 3000:3000

View file

@ -1,6 +1,6 @@
# Grafana Cloud Documentation
Source for documentation at https://grafana.com/docs/amixr/
Source for documentation at https://grafana.com/docs/oncall/
## Preview the website

View file

@ -30,7 +30,6 @@ For detailed installation instructions and additional resources, refer to the OS
For more information on production environment installation, refer to the following OSS Grafana OnCall [production environment helm chart](https://github.com/grafana/oncall/helm)
## Configure Slack for Grafana OnCall OSS
The Slack integration for Grafana OnCall leverages Slack API features to provide a customizable and useful integration. Refer to the following steps to configure the Slack integration:

View file

@ -54,6 +54,31 @@ class GrafanaAlertingSyncManager:
)
return
def alerting_config_with_respect_to_grafana_version(
self, is_grafana_datasource, datasource_id, datasource_uid, client_method, *args
):
"""Quick fix for deprecated grafana alerting api endpoints"""
if is_grafana_datasource:
datasource_attr = GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
config, response_info = client_method(datasource_attr, *args)
else:
# Get config by datasource id for Grafana version < 9
datasource_attr = datasource_id
config, response_info = client_method(datasource_attr, *args)
if response_info["status_code"] == status.HTTP_400_BAD_REQUEST:
# Get config by datasource uid for Grafana version >= 9
datasource_attr = datasource_uid
config, response_info = client_method(datasource_attr, *args)
if config is None:
logger.warning(
f"Got config None in alerting_config_with_respect_to_grafana_version with method "
f"{client_method.__name__} for is_grafana_datasource {is_grafana_datasource} for integration "
f"{self.alert_receive_channel.pk}; response: {response_info}"
)
return config, response_info
def create_contact_points(self) -> None:
"""
Get all alertmanager datasources and try to create contact points for them.
@ -84,6 +109,10 @@ class GrafanaAlertingSyncManager:
datasources_to_create.append(datasource)
if datasources_to_create:
logger.warning(
f"Some contact points were not created for integration {self.alert_receive_channel.pk}, "
f"trying to create async"
)
# create other contact points async
schedule_create_contact_points_for_datasource(self.alert_receive_channel.pk, datasources_to_create)
else:
@ -98,13 +127,14 @@ class GrafanaAlertingSyncManager:
if datasource is None:
datasource = {}
datasource_id_or_grafana = datasource.get("id") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
datasource_type = datasource.get("type") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
is_grafana_datasource = datasource.get("id") is None
logger.info(
f"Create contact point for {datasource_type} datasource, integration {self.alert_receive_channel.pk}"
)
config, response_info = self.client.get_alerting_config(datasource_id_or_grafana)
config, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource, datasource.get("id"), datasource.get("uid"), self.client.get_alerting_config
)
if config is None:
logger.warning(
@ -116,7 +146,12 @@ class GrafanaAlertingSyncManager:
updated_config = copy.deepcopy(config)
if config["alertmanager_config"] is None:
default_config, response_info = self.client.get_alertmanager_status_with_config(datasource_id_or_grafana)
default_config, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource,
datasource.get("id"),
datasource.get("uid"),
self.client.get_alertmanager_status_with_config,
)
if default_config is None:
logger.warning(
f"Failed to create contact point (alertmanager_config is None) for integration "
@ -144,7 +179,13 @@ class GrafanaAlertingSyncManager:
)
updated_config["alertmanager_config"]["receivers"] = receivers + [new_receiver]
response, response_info = self.client.update_alerting_config(updated_config, datasource_id_or_grafana)
response, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource,
datasource.get("id"),
datasource.get("uid"),
self.client.update_alerting_config,
updated_config,
)
if response is None:
logger.warning(
f"Failed to create contact point for integration {self.alert_receive_channel.pk} (POST): {response_info}"
@ -153,7 +194,9 @@ class GrafanaAlertingSyncManager:
logger.warning(f"Config: {config}\nUpdated config: {updated_config}")
return
config, response_info = self.client.get_alerting_config(datasource_id_or_grafana)
config, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource, datasource.get("id"), datasource.get("uid"), self.client.get_alerting_config
)
contact_point = self._create_contact_point_from_payload(config, receiver_name, datasource)
contact_point_created_text = "created" if contact_point else "not created, creation will be retried"
logger.info(
@ -232,6 +275,7 @@ class GrafanaAlertingSyncManager:
uid=receiver_config.get("uid"), # uid is None for non-Grafana datasource
datasource_name=datasource.get("name") or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT,
datasource_id=datasource.get("id"), # id is None for Grafana datasource
datasource_uid=datasource.get("uid"), # uid is None for Grafana datasource
)
contact_point.save()
return contact_point
@ -268,14 +312,23 @@ class GrafanaAlertingSyncManager:
def sync_contact_point(self, contact_point) -> None:
"""Update name of contact point and related routes or delete it if integration was deleted"""
datasource_id = contact_point.datasource_id or GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
datasource_type = "grafana" if not contact_point.datasource_id else "nongrafana"
datasource_type = (
GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
if not (contact_point.datasource_id or contact_point.datasource_uid)
else "nongrafana"
)
is_grafana_datasource = datasource_type == GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT
logger.info(
f"Sync contact point for {datasource_type} (name: {contact_point.datasource_name}) datasource, integration "
f"{self.alert_receive_channel.pk}"
)
config, response_info = self.client.get_alerting_config(datasource_id)
config, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource,
contact_point.datasource_id,
contact_point.datasource_uid,
self.client.get_alerting_config,
)
if config is None:
logger.warning(
f"Failed to update contact point (GET) for integration {self.alert_receive_channel.pk}: Is unified "
@ -286,7 +339,7 @@ class GrafanaAlertingSyncManager:
receivers = config["alertmanager_config"]["receivers"]
name_in_alerting = self.find_name_of_contact_point(
contact_point.uid,
datasource_id,
is_grafana_datasource,
receivers,
)
@ -300,8 +353,8 @@ class GrafanaAlertingSyncManager:
new_name,
)
contact_point.name = new_name
if datasource_id != GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT:
datasource_name = self.get_datasource_name(datasource_id)
if not is_grafana_datasource:
datasource_name = self.get_datasource_name(contact_point)
contact_point.datasource_name = datasource_name
contact_point.save(update_fields=["name", "datasource_name"])
# if integration was deleted, delete contact point and related routes
@ -310,8 +363,13 @@ class GrafanaAlertingSyncManager:
updated_config,
name_in_alerting,
)
response, response_info = self.client.update_alerting_config(updated_config, datasource_id)
response, response_info = self.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource,
contact_point.datasource_id,
contact_point.datasource_uid,
self.client.update_alerting_config,
updated_config,
)
if response is None:
logger.warning(
f"Failed to update contact point for integration {self.alert_receive_channel.pk} "
@ -379,8 +437,8 @@ class GrafanaAlertingSyncManager:
return alerting_route
def find_name_of_contact_point(self, contact_point_uid, datasource_id, receivers) -> str:
if datasource_id == GrafanaAlertingSyncManager.GRAFANA_CONTACT_POINT:
def find_name_of_contact_point(self, contact_point_uid, is_grafana_datasource, receivers) -> str:
if is_grafana_datasource:
name_in_alerting = self._find_name_of_contact_point_by_uid(contact_point_uid, receivers)
else:
name_in_alerting = self._find_name_of_contact_point_by_integration_url(receivers)
@ -415,6 +473,11 @@ class GrafanaAlertingSyncManager:
break
return name_in_alerting
def get_datasource_name(self, datasource_id) -> str:
datasource, _ = self.client.get_datasource(datasource_id)
def get_datasource_name(self, contact_point) -> str:
datasource_id = contact_point.datasource_id
datasource_uid = contact_point.datasource_uid
datasource, response_info = self.client.get_datasource(datasource_uid)
if response_info["status_code"] != 200:
# For old Grafana versions (< 9) try to use deprecated endpoint
datasource, _ = self.client.get_datasource_by_id(datasource_id)
return datasource["name"]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.2.13 on 2022-06-14 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alerts', '0002_squashed_initial'),
]
operations = [
migrations.AddField(
model_name='grafanaalertingcontactpoint',
name='datasource_uid',
field=models.CharField(default=None, max_length=100, null=True),
),
]

View file

@ -16,7 +16,8 @@ class GrafanaAlertingContactPoint(models.Model):
default=None,
related_name="contact_points",
)
uid = models.CharField(max_length=100, null=True, default=None) # uid is None for non-Grafana datasource
uid = models.CharField(max_length=100, null=True, default=None) # receiver uid is None for non-Grafana datasource
name = models.CharField(max_length=100)
datasource_name = models.CharField(max_length=100, default="grafana")
datasource_id = models.IntegerField(null=True, default=None) # id is None for Grafana datasource
datasource_uid = models.CharField(max_length=100, null=True, default=None) # uid is None for Grafana datasource

View file

@ -42,7 +42,14 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li
AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel")
alert_receive_channel = AlertReceiveChannel.objects.get(pk=alert_receive_channel_id)
alert_receive_channel = AlertReceiveChannel.objects.filter(pk=alert_receive_channel_id).first()
if not alert_receive_channel:
logger.debug(
f"Cannot create contact point for integration {alert_receive_channel_id}: integration does not exist"
)
return
grafana_alerting_sync_manager = alert_receive_channel.grafana_alerting_sync_manager
client = GrafanaAPIClient(
api_url=alert_receive_channel.organization.grafana_url,
@ -52,11 +59,23 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li
datasources_to_create = []
for datasource in datasource_list:
contact_point = None
config, response_info = client.get_alerting_config(datasource["id"])
is_grafana_datasource = not (datasource.get("id") or datasource.get("uid"))
config, response_info = grafana_alerting_sync_manager.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource, datasource.get("id"), datasource.get("uid"), client.get_alerting_config
)
if config is None:
logger.debug(
f"Got config None for is_grafana_datasource {is_grafana_datasource} "
f"for integration {alert_receive_channel_id}; response: {response_info}"
)
if response_info.get("status_code") == status.HTTP_404_NOT_FOUND:
client.get_alertmanager_status_with_config(datasource["id"])
contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource)
grafana_alerting_sync_manager.alerting_config_with_respect_to_grafana_version(
is_grafana_datasource,
datasource.get("id"),
datasource.get("uid"),
client.get_alertmanager_status_with_config,
)
contact_point = grafana_alerting_sync_manager.create_contact_point(datasource)
elif response_info.get("status_code") == status.HTTP_400_BAD_REQUEST:
logger.warning(
f"Failed to create contact point for integration {alert_receive_channel_id}, "
@ -64,9 +83,13 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li
)
continue
else:
contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource)
contact_point = grafana_alerting_sync_manager.create_contact_point(datasource)
if contact_point is None:
# Failed to create contact point duo to getting wrong alerting config.
logger.warning(
f"Failed to create contact point for integration {alert_receive_channel_id} due to getting wrong "
f"config, datasource info: {datasource}; response: {response_info}. Retrying"
)
# Failed to create contact point due to getting wrong alerting config.
# Add datasource to list and retry to create contact point for it again
datasources_to_create.append(datasource)

View file

@ -3,7 +3,6 @@ from datetime import timedelta
import humanize
import pytz
from django.apps import apps
from django.conf import settings
from django.utils import timezone
from rest_framework import fields, serializers
@ -121,7 +120,6 @@ class CurrentOrganizationSerializer(OrganizationSerializer):
return {
"telegram_configured": telegram_configured,
"twilio_configured": twilio_configured,
"extra_messaging_backends_enabled": settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED,
}
def get_stats(self, obj):

View file

@ -80,30 +80,6 @@ def test_current_team_update_permissions(
assert response.status_code == expected_status
@pytest.mark.django_db
@pytest.mark.parametrize("feature_flag_enabled", [False, True])
def test_current_team_messaging_backend_status(
settings,
make_organization,
make_user_for_organization,
make_token_for_organization,
make_user_auth_headers,
feature_flag_enabled,
):
org = make_organization()
tester = make_user_for_organization(org, role=Role.ADMIN)
_, token = make_token_for_organization(org)
client = APIClient()
settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = feature_flag_enabled
url = reverse("api-internal:api-current-team")
response = client.get(url, format="json", **make_user_auth_headers(tester, token))
assert response.status_code == status.HTTP_200_OK
assert response.json()["env_status"]["extra_messaging_backends_enabled"] == bool(feature_flag_enabled)
@pytest.mark.django_db
@pytest.mark.parametrize(
"role,expected_status",

View file

@ -450,22 +450,16 @@ def test_switch_wait_delay(
@pytest.mark.django_db
@pytest.mark.parametrize("feature_flag_enabled", [False, True])
def test_notification_policy_backends_enabled(
user_notification_policy_internal_api_setup, settings, make_user_auth_headers, feature_flag_enabled
user_notification_policy_internal_api_setup, settings, make_user_auth_headers
):
token, _, users = user_notification_policy_internal_api_setup
admin, _ = users
settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = feature_flag_enabled
client = APIClient()
url = reverse("api-internal:notification_policy-notify-by-options")
response = client.get(url, **make_user_auth_headers(admin, token))
assert response.status_code == status.HTTP_200_OK
options = [opt["display_name"] for opt in response.json()]
if feature_flag_enabled:
assert "Test Only Backend" in options
else:
assert "Test Only Backend" not in options
assert "Test Only Backend" in options

View file

@ -52,9 +52,6 @@ def load_backend(path):
def get_messaging_backends():
global _messaging_backends
if not settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED:
return {}
if _messaging_backends is None:
_messaging_backends = {}
for backend_path in settings.EXTRA_MESSAGING_BACKENDS:
@ -64,10 +61,7 @@ def get_messaging_backends():
def get_messaging_backend_from_id(backend_id):
backend = None
if settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED:
backend = _messaging_backends.get(backend_id)
return backend
return _messaging_backends.get(backend_id)
_messaging_backends = None

View file

@ -3,17 +3,7 @@ import pytest
from apps.base.messaging import get_messaging_backend_from_id, get_messaging_backends
@pytest.mark.django_db
def test_messaging_backends_disabled(settings):
settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = False
assert get_messaging_backends() == {}
assert get_messaging_backend_from_id("TESTONLY") is None
@pytest.mark.django_db
def test_messaging_backends_enabled(settings):
settings.FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
assert get_messaging_backends() != {}
assert get_messaging_backend_from_id("TESTONLY") is not None

View file

@ -103,16 +103,20 @@ class GrafanaAPIClient(APIClient):
def get_datasources(self):
return self.api_get("api/datasources")
def get_datasource(self, datasource_id):
def get_datasource_by_id(self, datasource_id):
# This endpoint is deprecated for Grafana version >= 9. Use get_datasource instead
return self.api_get(f"api/datasources/{datasource_id}")
def get_datasource(self, datasource_uid):
return self.api_get(f"api/datasources/uid/{datasource_uid}")
def get_alertmanager_status_with_config(self, recipient):
return self.api_get(f"api/alertmanager/{recipient}/api/v2/status")
def get_alerting_config(self, recipient):
return self.api_get(f"api/alertmanager/{recipient}/config/api/v1/alerts")
def update_alerting_config(self, config, recipient):
def update_alerting_config(self, recipient, config):
return self.api_post(f"api/alertmanager/{recipient}/config/api/v1/alerts", config)

View file

@ -1,17 +1,88 @@
import json
from django.core.validators import URLValidator, ValidationError
from jinja2 import Template, TemplateError
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from apps.alerts.models import CustomButton
from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField
from common.api_helpers.utils import CurrentOrganizationDefault
class ActionSerializer(serializers.ModelSerializer):
class ActionCreateSerializer(serializers.ModelSerializer):
id = serializers.CharField(read_only=True, source="public_primary_key")
team_id = TeamPrimaryKeyRelatedField(allow_null=True, source="team")
organization = serializers.HiddenField(default=CurrentOrganizationDefault())
team_id = TeamPrimaryKeyRelatedField(required=False, allow_null=True, source="team")
url = serializers.CharField(required=True, allow_null=False, allow_blank=False, source="webhook")
class Meta:
model = CustomButton
fields = [
"id",
"name",
"organization",
"team_id",
"url",
"data",
"user",
"password",
"authorization_header",
"forward_whole_payload",
]
extra_kwargs = {
"name": {"required": True, "allow_null": False, "allow_blank": False},
"data": {"required": False, "allow_null": True, "allow_blank": False},
"user": {"required": False, "allow_null": True, "allow_blank": False},
"password": {"required": False, "allow_null": True, "allow_blank": False},
"authorization_header": {"required": False, "allow_null": True, "allow_blank": False},
"forward_whole_payload": {"required": False, "allow_null": True},
}
validators = [UniqueTogetherValidator(queryset=CustomButton.objects.all(), fields=["name", "organization"])]
def validate_url(self, url):
if url:
try:
URLValidator()(url)
except ValidationError:
raise serializers.ValidationError("URL is incorrect")
return url
return None
def validate_data(self, data):
if not data:
return None
try:
json.loads(data)
except ValueError:
raise serializers.ValidationError("Data has incorrect format")
try:
Template(data)
except TemplateError:
raise serializers.ValidationError("Data has incorrect template")
return data
def validate_forward_whole_payload(self, data):
if data is None:
return False
return data
class ActionUpdateSerializer(ActionCreateSerializer):
team_id = TeamPrimaryKeyRelatedField(source="team", read_only=True)
url = serializers.CharField(required=False, allow_null=False, allow_blank=False, source="webhook")
class Meta(ActionCreateSerializer.Meta):
extra_kwargs = {
"name": {"required": False, "allow_null": False, "allow_blank": False},
"data": {"required": False, "allow_null": True, "allow_blank": False},
"user": {"required": False, "allow_null": True, "allow_blank": False},
"password": {"required": False, "allow_null": True, "allow_blank": False},
"authorization_header": {"required": False, "allow_null": True, "allow_blank": False},
"forward_whole_payload": {"required": False, "allow_null": True},
}

View file

@ -3,6 +3,8 @@ from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from apps.alerts.models import CustomButton
@pytest.mark.django_db
def test_get_custom_actions(
@ -28,6 +30,12 @@ def test_get_custom_actions(
"id": custom_action.public_primary_key,
"name": custom_action.name,
"team_id": None,
"url": custom_action.webhook,
"data": custom_action.data,
"user": custom_action.user,
"password": custom_action.password,
"authorization_header": custom_action.authorization_header,
"forward_whole_payload": custom_action.forward_whole_payload,
}
],
}
@ -60,6 +68,12 @@ def test_get_custom_actions_filter_by_name(
"id": custom_action.public_primary_key,
"name": custom_action.name,
"team_id": None,
"url": custom_action.webhook,
"data": custom_action.data,
"user": custom_action.user,
"password": custom_action.password,
"authorization_header": custom_action.authorization_header,
"forward_whole_payload": custom_action.forward_whole_payload,
}
],
}
@ -87,3 +101,171 @@ def test_get_custom_actions_filter_by_name_empty_result(
assert response.status_code == status.HTTP_200_OK
assert response.data == expected_payload
@pytest.mark.django_db
def test_get_custom_action(
make_organization_and_user_with_token,
make_custom_action,
):
organization, user, token = make_organization_and_user_with_token()
client = APIClient()
custom_action = make_custom_action(organization=organization)
url = reverse("api-public:actions-detail", kwargs={"pk": custom_action.public_primary_key})
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
expected_payload = {
"id": custom_action.public_primary_key,
"name": custom_action.name,
"team_id": None,
"url": custom_action.webhook,
"data": custom_action.data,
"user": custom_action.user,
"password": custom_action.password,
"authorization_header": custom_action.authorization_header,
"forward_whole_payload": custom_action.forward_whole_payload,
}
assert response.status_code == status.HTTP_200_OK
assert response.data == expected_payload
@pytest.mark.django_db
def test_create_custom_action(make_organization_and_user_with_token):
organization, user, token = make_organization_and_user_with_token()
client = APIClient()
url = reverse("api-public:actions-list")
data = {
"name": "Test outgoing webhook",
"url": "https://example.com",
}
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
custom_action = CustomButton.objects.get(public_primary_key=response.data["id"])
expected_result = {
"id": custom_action.public_primary_key,
"name": custom_action.name,
"team_id": None,
"url": custom_action.webhook,
"data": custom_action.data,
"user": custom_action.user,
"password": custom_action.password,
"authorization_header": custom_action.authorization_header,
"forward_whole_payload": custom_action.forward_whole_payload,
}
assert response.status_code == status.HTTP_201_CREATED
assert response.data == expected_result
@pytest.mark.django_db
def test_create_custom_action_invalid_data(
make_organization_and_user_with_token,
):
organization, user, token = make_organization_and_user_with_token()
client = APIClient()
url = reverse("api-public:actions-list")
data = {
"name": "Test outgoing webhook",
"url": "invalid_url",
}
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["url"][0] == "URL is incorrect"
data = {
"name": "Test outgoing webhook",
}
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["url"][0] == "This field is required."
data = {
"url": "https://example.com",
}
response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data["name"][0] == "This field is required."
@pytest.mark.django_db
def test_update_custom_action(
make_organization_and_user_with_token,
make_custom_action,
):
organization, user, token = make_organization_and_user_with_token()
client = APIClient()
custom_action = make_custom_action(organization=organization)
url = reverse("api-public:actions-detail", kwargs={"pk": custom_action.public_primary_key})
data = {
"name": "RENAMED",
}
assert custom_action.name != data["name"]
response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}")
expected_result = {
"id": custom_action.public_primary_key,
"name": data["name"],
"team_id": None,
"url": custom_action.webhook,
"data": custom_action.data,
"user": custom_action.user,
"password": custom_action.password,
"authorization_header": custom_action.authorization_header,
"forward_whole_payload": custom_action.forward_whole_payload,
}
assert response.status_code == status.HTTP_200_OK
custom_action.refresh_from_db()
assert custom_action.name == expected_result["name"]
assert response.data == expected_result
@pytest.mark.django_db
def test_delete_custom_action(
make_organization_and_user_with_token,
make_custom_action,
):
organization, user, token = make_organization_and_user_with_token()
client = APIClient()
custom_action = make_custom_action(organization=organization)
url = reverse("api-public:actions-detail", kwargs={"pk": custom_action.public_primary_key})
assert custom_action.deleted_at is None
response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_204_NO_CONTENT
custom_action.refresh_from_db()
assert custom_action.deleted_at is not None
response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}")
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data["detail"] == "Not found."

View file

@ -1,25 +1,26 @@
from django_filters import rest_framework as filters
from rest_framework import mixins
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import GenericViewSet
from rest_framework.viewsets import ModelViewSet
from apps.alerts.models import CustomButton
from apps.auth_token.auth import ApiTokenAuthentication
from apps.public_api.serializers.action import ActionSerializer
from apps.public_api.serializers.action import ActionCreateSerializer, ActionUpdateSerializer
from apps.public_api.throttlers.user_throttle import UserThrottle
from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log
from common.api_helpers.filters import ByTeamFilter
from common.api_helpers.mixins import RateLimitHeadersMixin
from common.api_helpers.mixins import PublicPrimaryKeyMixin, RateLimitHeadersMixin, UpdateSerializerMixin
from common.api_helpers.paginators import FiftyPageSizePaginator
class ActionView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet):
class ActionView(RateLimitHeadersMixin, PublicPrimaryKeyMixin, UpdateSerializerMixin, ModelViewSet):
authentication_classes = (ApiTokenAuthentication,)
permission_classes = (IsAuthenticated,)
pagination_class = FiftyPageSizePaginator
throttle_classes = [UserThrottle]
model = CustomButton
serializer_class = ActionSerializer
serializer_class = ActionCreateSerializer
update_serializer_class = ActionUpdateSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = ByTeamFilter
@ -32,3 +33,27 @@ class ActionView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet):
queryset = queryset.filter(name=action_name)
return queryset
def perform_create(self, serializer):
serializer.save()
instance = serializer.instance
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was created"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CREATED, description)
def perform_update(self, serializer):
organization = self.request.auth.organization
user = self.request.user
old_state = serializer.instance.repr_settings_for_client_side_logging
serializer.save()
new_state = serializer.instance.repr_settings_for_client_side_logging
description = f"Custom action {serializer.instance.name} was changed " f"from:\n{old_state}\nto:\n{new_state}"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_CHANGED, description)
def perform_destroy(self, instance):
organization = self.request.auth.organization
user = self.request.user
description = f"Custom action {instance.name} was deleted"
create_organization_log(organization, user, OrganizationLogType.TYPE_CUSTOM_ACTION_DELETED, description)
instance.delete()

View file

@ -96,7 +96,7 @@ telegram_message = """\
{%- if "status" in payload -%}
<b>Status</b>: {{ payload.status }}
{% endif -%}
<b>Labels:** {% for k, v in payload["labels"].items() %}
<b>Labels:</b> {% for k, v in payload["labels"].items() %}
{{ k }}: {{ v }}{% endfor %}
<b>Annotations:</b>
{%- for k, v in payload.get("annotations", {}).items() %}
@ -211,7 +211,7 @@ tests = {
"title": "KubeJobCompletion",
"message": (
"<b>Status</b>: firing\n"
"<b>Labels:** \n"
"<b>Labels:</b> \n"
"job: kube-state-metrics\n"
"instance: 10.143.139.7:8443\n"
"job_name: email-tracking-perform-initialization-1.0.50\n"

View file

@ -350,6 +350,8 @@ SOCIAL_AUTH_SLACK_LOGIN_KEY = SLACK_CLIENT_OAUTH_ID
SOCIAL_AUTH_SLACK_LOGIN_SECRET = SLACK_CLIENT_OAUTH_SECRET
SOCIAL_AUTH_SETTING_NAME_TO_LIVE_SETTING_NAME = {
"SOCIAL_AUTH_SLACK_LOGIN_KEY": "SLACK_CLIENT_OAUTH_ID",
"SOCIAL_AUTH_SLACK_LOGIN_SECRET": "SLACK_CLIENT_OAUTH_SECRET",
"SOCIAL_AUTH_SLACK_INSTALL_FREE_KEY": "SLACK_CLIENT_OAUTH_ID",
"SOCIAL_AUTH_SLACK_INSTALL_FREE_SECRET": "SLACK_CLIENT_OAUTH_SECRET",
}
@ -422,7 +424,6 @@ DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880
# Log inbound/outbound calls as slow=1 if they exceed threshold
SLOW_THRESHOLD_SECONDS = 2.0
FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = getenv_boolean("FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED", default=False)
EXTRA_MESSAGING_BACKENDS = []
INSTALLED_ONCALL_INTEGRATIONS = [

View file

@ -37,5 +37,4 @@ SENDGRID_SECRET_KEY = "dummy_sendgrid_secret_key"
TWILIO_ACCOUNT_SID = "dummy_twilio_account_sid"
TWILIO_AUTH_TOKEN = "dummy_twilio_auth_token"
FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
EXTRA_MESSAGING_BACKENDS = ["apps.base.tests.messaging_backend.TestOnlyBackend"]

View file

@ -98,7 +98,6 @@ SWAGGER_SETTINGS = {
}
if TESTING:
FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = True
EXTRA_MESSAGING_BACKENDS = ["apps.base.tests.messaging_backend.TestOnlyBackend"]
TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
TWILIO_AUTH_TOKEN = "twilio_auth_token"

View file

@ -1,5 +1,4 @@
import os
import sys
# Workaround to use pymysql instead of mysqlclient
import pymysql
@ -57,29 +56,3 @@ CACHES = {
APPEND_SLASH = False
SECURE_SSL_REDIRECT = False
TESTING = "pytest" in sys.modules or "unittest" in sys.modules
if TESTING:
TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX"
TWILIO_AUTH_TOKEN = "twilio_auth_token"
# TODO: OSS: Add these setting to oss settings file. Add Version there too.
OSS_INSTALLATION_FEATURES_ENABLED = True
INSTALLED_APPS += ["apps.oss_installation"] # noqa
CELERY_BEAT_SCHEDULE["send_usage_stats"] = { # noqa
"task": "apps.oss_installation.tasks.send_usage_stats_report",
"schedule": crontab(hour=0, minute=randrange(0, 59)), # Send stats report at a random minute past midnight # noqa
"args": (),
} # noqa
CELERY_BEAT_SCHEDULE["send_cloud_heartbeat"] = { # noqa
"task": "apps.oss_installation.tasks.send_cloud_heartbeat",
"schedule": crontab(minute="*/3"), # noqa
"args": (),
} # noqa
SEND_ANONYMOUS_USAGE_STATS = True

View file

@ -11,7 +11,6 @@ harakiri=620
max-requests=5000
vacuum=True
buffer-size=65535
listen=1024
http-auto-chunked=True
http-timeout=620
post-buffering=1

View file

@ -1,5 +1,14 @@
# Change Log
## 1.0.2 (2022-06-17)
- Fix Grafana Alerting integration to handle API changes in Grafana 9
- Improve public API endpoint for outgoing webhooks (/actions) by adding ability to create, update and delete
## 1.0.0 (2022-06-14)
- First Public Release
## 0.0.71 (2022-06-06)
- Initial Release
- Initial Commit Release

View file

@ -68,6 +68,5 @@ export interface Team {
env_status: {
twilio_configured: boolean;
telegram_configured: boolean;
extra_messaging_backends_enabled: boolean;
};
}

5
helm/cr.yaml Normal file
View file

@ -0,0 +1,5 @@
git-repo: helm-charts
key: Grafana Loki
owner: grafana
sign: true
skip-existing: true

12
helm/ct.yaml Normal file
View file

@ -0,0 +1,12 @@
# See https://github.com/helm/chart-testing#configuration
remote: origin
target-branch: main
chart-dirs:
- helm
chart-repos:
- jetstack=https://charts.jetstack.io
- bitnami=https://charts.bitnami.com/bitnami
- grafana=https://grafana.github.io/helm-charts
- ingress-nginx=https://kubernetes.github.io/ingress-nginx
helm-extra-args: --timeout 600s
validate-maintainers: false

View file

@ -8,7 +8,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View file

@ -21,7 +21,7 @@ helm install \
--wait \
--set base_url=example.com \
--set grafana."grafana\.ini".server.domain=example.com \
oncall \
release-oncall \
.
```
@ -36,7 +36,7 @@ helm upgrade \
--wait \
--set base_url=example.com \
--set grafana."grafana\.ini".server.domain=example.com \
oncall \
release-oncall \
.
```
@ -104,17 +104,17 @@ externalRabbitmq:
## Uninstall
### Uninstalling the helm chart
```bash
helm delete oncall
helm delete release-oncall
```
### Clean up PVC's
```bash
kubectl delete pvc data-oncall-mariadb-0 data-oncall-rabbitmq-0 \
redis-data-oncall-redis-master-0 redis-data-oncall-redis-replicas-0 \
redis-data-oncall-redis-replicas-1 redis-data-oncall-redis-replicas-2
kubectl delete pvc data-release-oncall-mariadb-0 data-release-oncall-rabbitmq-0 \
redis-data-release-oncall-redis-master-0 redis-data-release-oncall-redis-replicas-0 \
redis-data-release-oncall-redis-replicas-1 redis-data-release-oncall-redis-replicas-2
```
### Clean up secrets
```bash
kubectl delete secrets certificate-tls oncall-cert-manager-webhook-ca oncall-ingress-nginx-admission
kubectl delete secrets certificate-tls release-oncall-cert-manager-webhook-ca release-oncall-ingress-nginx-admission
```

View file

@ -19,6 +19,8 @@
value: "admin"
- name: OSS
value: "True"
- name: UWSGI_LISTEN
value: "1024"
{{- end }}
{{- define "snippet.celery.env" -}}

View file

@ -38,6 +38,6 @@ metadata:
name: {{ include "oncall.fullname" . }}-redis-external
type: Opaque
data:
rabbitmq-password: {{ required "externalRedis.password is required if not redis.enabled" .Values.externalRedis.password | b64enc | quote }}
redis-password: {{ required "externalRedis.password is required if not redis.enabled" .Values.externalRedis.password | b64enc | quote }}
{{- end }}

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "oncall.fullname" . }}-test-connection"
labels:
{{- include "oncall.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "oncall.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View file

@ -55,7 +55,7 @@ ingress:
cert-manager.io/issuer: "letsencrypt-prod"
# Whether to install ingress controller
nginx-ingress:
ingress-nginx:
enabled: true
# Install cert-manager as a part of the release
@ -120,7 +120,7 @@ externalRabbitmq:
redis:
enabled: true
external_redis:
externalRedis:
host:
password: