Merge pull request #1513 from grafana/dev

v1.1.35
This commit is contained in:
Innokentii Konstantinov 2023-03-09 16:42:42 +08:00 committed by GitHub
commit 4778d2b504
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 351 additions and 19 deletions

View file

@ -5,6 +5,17 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## v1.1.35 (2023-03-09)
### Added
- Insight logs
### Fixed
- Fixed issue with Alert group involved users filter
- Fixed email sending failure due to newline in title
## v1.1.34 (2023-03-08)
### Added

View file

@ -66,6 +66,31 @@ send a demo alert.
For more information on Grafana OnCall integrations and further configuration guidance, refer to
[Grafana OnCall integrations]({{< relref "../integrations" >}})
### Learn Alert Flow
All Alerts in OnCall are grouped to Alert Groups ([read more about Grouping ID]({{< relref "../alert-behavior/alert-templates" >}})). Alert Group could have mutually
exclusive states:
- **Firing:** Once Alert Group is registered, Escalation Policy associated with it is getting started. Escalation policy will work while Alert Group is in this status.
- **Acknowledged:** Ongoing Escalation Chain will be interrupted. Unacknowledge will move Alert Group to the "Firing" state and will re-launch Escalation Chain.
- **Silenced:** Similar to "Acknowledged" but designed to be temporary with a timeout. Once time is out, will re-launch Escalation Chain and move Alert Group
to the "Firing" state.
- **Resolved:** Similar to "Acknowledged".
Possible transitions:
- Firing -> Acknowledged
- Firing -> Silenced
- Firing -> Resolved
- Silenced -> Firing
- Silenced -> Acknowledged
- Silenced -> Resolved
- Acknowledged -> Firing
- Acknowledged -> Resolved
- Resolved -> Firing
Transitions change trigger Escalation Chain launch with a few-seconds delay to avoid unexpected notifications.
### Configure Escalation Chains
Escalation Chains are customizable automated alert routing steps that enable you to specify who is notified for a certain

View file

@ -179,6 +179,7 @@ class ChannelFilter(OrderedModel):
@property
def insight_logs_serialized(self):
result = {
"filtering_term_type": self.get_filtering_term_type_display(),
"filtering_term": self.str_for_clients,
"order": self.order,
"slack_notification_enabled": self.notify_in_slack,

View file

@ -10,6 +10,7 @@ from rest_framework.test import APIClient
from apps.alerts.models import AlertGroup, AlertGroupLogRecord, AlertReceiveChannel
from apps.api.permissions import LegacyAccessControlRole
from apps.base.models import UserNotificationPolicyLogRecord
alert_raw_request_data = {
"evalMatches": [
@ -587,6 +588,119 @@ def test_get_filter_invitees_are_ag_with_multiple_logs(
assert len(first_response.data["results"]) == 1
@pytest.mark.django_db
def test_get_filter_mine(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_alert_receive_channel,
make_channel_filter,
make_alert_group,
make_alert,
make_user_auth_headers,
):
client = APIClient()
organization, first_user, token = make_organization_and_user_with_plugin_token()
alert_receive_channel = make_alert_receive_channel(organization)
default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
acknowledged_alert_group = make_alert_group(
alert_receive_channel,
channel_filter=default_channel_filter,
acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
resolved_at=timezone.now() + datetime.timedelta(hours=2),
acknowledged=True,
acknowledged_by_user=first_user,
)
make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
# other alert group
make_alert_group(
alert_receive_channel,
channel_filter=default_channel_filter,
)
make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
url = reverse("api-internal:alertgroup-list")
first_response = client.get(
url + f"?mine=true",
format="json",
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert len(first_response.data["results"]) == 1
second_response = client.get(
url + f"?mine=false",
format="json",
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert len(second_response.data["results"]) == 2
@pytest.mark.django_db
def test_get_filter_involved_users(
make_organization_and_user_with_plugin_token,
make_user_for_organization,
make_alert_receive_channel,
make_channel_filter,
make_alert_group,
make_alert,
make_user_auth_headers,
):
client = APIClient()
organization, first_user, token = make_organization_and_user_with_plugin_token()
second_user = make_user_for_organization(organization)
alert_receive_channel = make_alert_receive_channel(organization)
default_channel_filter = make_channel_filter(alert_receive_channel, is_default=True)
acknowledged_alert_group = make_alert_group(
alert_receive_channel,
channel_filter=default_channel_filter,
acknowledged_at=timezone.now() + datetime.timedelta(hours=1),
resolved_at=timezone.now() + datetime.timedelta(hours=2),
acknowledged=True,
acknowledged_by_user=first_user,
)
make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
# other alert group
other_alert_group = make_alert_group(
alert_receive_channel,
channel_filter=default_channel_filter,
)
make_alert(alert_group=acknowledged_alert_group, raw_request_data=alert_raw_request_data)
# second user was notified
other_alert_group.personal_log_records.create(
type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED,
author=second_user,
)
url = reverse("api-internal:alertgroup-list")
first_response = client.get(
url + f"?acknowledged_by={first_user.public_primary_key}",
format="json",
**make_user_auth_headers(first_user, token),
)
assert first_response.status_code == status.HTTP_200_OK
assert len(first_response.data["results"]) == 1
second_response = client.get(
url
+ f"?involved_users_are={first_user.public_primary_key}&involved_users_are={second_user.public_primary_key}",
format="json",
**make_user_auth_headers(first_user, token),
)
assert second_response.status_code == status.HTTP_200_OK
assert len(second_response.data["results"]) == 2
@pytest.mark.django_db
def test_get_filter_with_resolution_note(
alert_group_internal_api_setup,

View file

@ -151,15 +151,15 @@ class AlertGroupFilter(DateRangeFilterMixin, ModelFieldFilterMixin, filters.Filt
Q(personal_log_records__author__in=users)
|
# or interacted with the alert group
Q(acknowledged_by_user__pk__in=users)
| Q(resolved_by_user__pk__in=users)
| Q(silenced_by_user__pk__in=users)
Q(acknowledged_by_user__in=users)
| Q(resolved_by_user__in=users)
| Q(silenced_by_user__in=users)
).distinct()
return queryset
def filter_mine(self, queryset, name, value):
if value:
return self.filter_by_involved_users(queryset, "users", [self.request.user.pk])
return self.filter_by_involved_users(queryset, "users", [self.request.user])
return queryset
def filter_with_resolution_note(self, queryset, name, value):

View file

@ -50,6 +50,6 @@ def build_subject_and_message(alert_group, emails_left):
)
title = str_or_backup(templated_alert.title, title_fallback)
subject = f"[{title}] You are invited to check an alert group"
subject = f"[{title}] You are invited to check an alert group".replace("\n", "")
return subject, content

View file

@ -6,6 +6,7 @@ from django.core import mail
from django.core.mail.backends.locmem import EmailBackend
from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
from apps.email.alert_rendering import build_subject_and_message
from apps.email.tasks import get_from_email, notify_user_async
from apps.user_management.subscription_strategy.free_public_beta_subscription_strategy import (
FreePublicBetaSubscriptionStrategy,
@ -190,3 +191,21 @@ def test_get_from_email(
user = make_user_for_organization(organization)
assert get_from_email(user) == expected
@pytest.mark.django_db
def test_subject_newlines_removed(
make_organization,
make_alert_receive_channel,
make_alert_group,
make_alert,
):
organization = make_organization()
alert_receive_channel = make_alert_receive_channel(
organization, messaging_backends_templates={"EMAIL": {"title": "test\nnewlines"}}
)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group, raw_request_data={})
subject, _ = build_subject_and_message(alert_group, 1)
assert subject == "[testnewlines] You are invited to check an alert group"

View file

@ -46,6 +46,7 @@ class GCOMInstanceInfo(TypedDict):
orgName: str
url: str
status: str
clusterSlug: str
config: Optional[GCOMInstanceInfoConfig]

View file

@ -58,6 +58,7 @@ def check_gcom_permission(token_string: str, context) -> Optional["GcomToken"]:
org_slug=instance_info["orgSlug"],
org_title=instance_info["orgName"],
region_slug=instance_info["regionSlug"],
cluster_slug=instance_info["clusterSlug"],
gcom_token=token_string,
gcom_token_org_last_time_synced=timezone.now(),
)
@ -67,6 +68,7 @@ def check_gcom_permission(token_string: str, context) -> Optional["GcomToken"]:
organization.org_title = instance_info["orgName"]
organization.region_slug = instance_info["regionSlug"]
organization.grafana_url = instance_info["url"]
organization.cluster_slug = instance_info["clusterSlug"]
organization.gcom_token = token_string
organization.gcom_token_org_last_time_synced = timezone.now()
organization.save(
@ -78,6 +80,7 @@ def check_gcom_permission(token_string: str, context) -> Optional["GcomToken"]:
"grafana_url",
"gcom_token",
"gcom_token_org_last_time_synced",
"cluster_slug",
]
)
logger.debug(f"Finish authenticate by making request to gcom api for org={org_id}, stack_id={stack_id}")

View file

@ -18,6 +18,7 @@ STACK_SLUG = "asdfasdf"
ORG_SLUG = "hellooo"
ORG_TITLE = "nmvcnmvnmvc"
REGION_SLUG = "nmcvnmcvnmcvnmcv"
CLUSTER_SLUG = "nmcvnmcvnmcvnmcvnmcv"
SELF_HOSTED_SETTINGS = {
"GRAFANA_API_URL": GRAFANA_API_URL,
"STACK_ID": STACK_ID,
@ -27,6 +28,7 @@ SELF_HOSTED_SETTINGS = {
"ORG_SLUG": ORG_SLUG,
"ORG_TITLE": ORG_TITLE,
"REGION_SLUG": REGION_SLUG,
"CLUSTER_SLUG": CLUSTER_SLUG,
}
UNABLE_TO_FIND_GRAFANA_ERROR_MSG = f"Unable to connect to the specified Grafana API - {GRAFANA_API_URL}"

View file

@ -59,6 +59,7 @@ class SelfHostedInstallView(GrafanaHeadersMixin, APIView):
org_slug=settings.SELF_HOSTED_SETTINGS["ORG_SLUG"],
org_title=settings.SELF_HOSTED_SETTINGS["ORG_TITLE"],
region_slug=settings.SELF_HOSTED_SETTINGS["REGION_SLUG"],
cluster_slug=settings.SELF_HOSTED_SETTINGS["CLUSTER_SLUG"],
grafana_url=grafana_url,
api_token=grafana_api_token,
is_rbac_permissions_enabled=rbac_is_enabled,

View file

@ -0,0 +1,29 @@
from django.db import migrations, models
import django.db.models.deletion
def delete_user_duplicate_mobileappauthtokens(apps, _):
MobileAppAuthToken = apps.get_model('mobile_app', 'MobileAppAuthToken')
# start w/ the oldest mobile app auth tokens (ORDER BY id ASC)
# and if we find any newer tokens, delete the earlier ones (ie. `row` variable)
for row in MobileAppAuthToken.objects.all().order_by('id'):
if MobileAppAuthToken.objects.filter(user_id=row.user_id).count() > 1:
row.delete()
class Migration(migrations.Migration):
dependencies = [
('user_management', '0008_organization_is_grafana_incident_enabled'),
('mobile_app', '0001_initial'),
]
operations = [
migrations.RunPython(delete_user_duplicate_mobileappauthtokens, migrations.RunPython.noop),
migrations.AlterField(
model_name='mobileappauthtoken',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='user_management.user'),
),
]

View file

@ -51,9 +51,7 @@ class MobileAppVerificationToken(BaseAuthToken):
class MobileAppAuthToken(BaseAuthToken):
user = models.ForeignKey(
to=User, null=False, blank=False, related_name="mobile_app_auth_tokens", on_delete=models.CASCADE
)
user = models.OneToOneField(to=User, null=False, blank=False, on_delete=models.CASCADE)
organization = models.ForeignKey(
to=Organization, null=False, blank=False, related_name="mobile_app_auth_tokens", on_delete=models.CASCADE
)

View file

@ -0,0 +1,77 @@
import pytest
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from apps.mobile_app.models import MobileAppAuthToken
@pytest.mark.django_db
def test_mobile_app_auth_token(
make_organization_and_user_with_mobile_app_verification_token,
):
organization, user, verification_token = make_organization_and_user_with_mobile_app_verification_token()
client = APIClient()
url = reverse("mobile_app:auth_token")
response = client.post(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
response = client.post(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_201_CREATED
original_auth_token_id = response.data["id"]
original_auth_token = response.data["token"]
original_auth_token_created_at = response.data["created_at"]
assert original_auth_token_id is not None
assert original_auth_token is not None
assert original_auth_token_created_at is not None
# we can fetch the token
response = client.get(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
response = client.get(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_200_OK
assert response.data["token_id"] == original_auth_token_id
assert response.data["user_id"] == user.id
assert response.data["organization_id"] == organization.id
assert response.data["created_at"] == original_auth_token_created_at
assert response.data["revoked_at"] is None
# can only ever have one mobile app auth token.. old one gets deleted if we try
# creating a new one
response = client.post(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_201_CREATED
new_auth_token_id = response.data["id"]
new_auth_token = response.data["token"]
new_auth_token_created_at = response.data["created_at"]
assert new_auth_token_id is not None
assert new_auth_token is not None
assert new_auth_token_created_at is not None
assert new_auth_token_id != original_auth_token_id
assert new_auth_token != original_auth_token
assert new_auth_token_created_at != original_auth_token_created_at
assert MobileAppAuthToken.objects.filter(user=user).count() == 1
# we can delete the token
response = client.delete(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
response = client.delete(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert MobileAppAuthToken.objects.filter(user=user).count() == 0
response = client.delete(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_404_NOT_FOUND
response = client.get(url, HTTP_AUTHORIZATION=verification_token)
assert response.status_code == status.HTTP_404_NOT_FOUND

View file

@ -482,10 +482,10 @@ def post_slack_rate_limit_message(integration_id):
slack_channel = default_route.slack_channel_id_or_general_log_id
if slack_channel:
text = (
f"Delivering and updating incidents of integration {integration.verbal_name} in Slack is "
f"temporarily stopped due to rate limit. You could find new incidents at "
f"Delivering and updating alert groups of integration {integration.verbal_name} in Slack is "
f"temporarily stopped due to rate limit. You could find new alert groups at "
f"<{integration.new_incidents_web_link}|web page "
'"Incidents">'
'"Alert Groups">'
)
post_message_to_channel(integration.organization, slack_channel, text)

View file

@ -0,0 +1,18 @@
# Generated by Django 3.2.18 on 2023-03-08 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_management', '0008_organization_is_grafana_incident_enabled'),
]
operations = [
migrations.AddField(
model_name='organization',
name='cluster_slug',
field=models.CharField(default=None, max_length=300, null=True),
),
]

View file

@ -110,6 +110,7 @@ class Organization(MaintainableObject):
default=None,
null=True,
)
cluster_slug = models.CharField(max_length=300, null=True, default=None)
grafana_url = models.URLField()

View file

@ -41,6 +41,7 @@ def sync_organization(organization):
organization.save(
update_fields=[
"cluster_slug",
"stack_slug",
"org_slug",
"org_title",
@ -68,6 +69,7 @@ def _sync_instance_info(organization):
organization.org_title = instance_info["orgName"]
organization.region_slug = instance_info["regionSlug"]
organization.grafana_url = instance_info["url"]
organization.cluster_slug = instance_info["clusterSlug"]
organization.gcom_token_org_last_time_synced = timezone.now()

View file

@ -25,6 +25,7 @@ class ChatOpsType(enum.Enum):
SLACK = "SLACK"
MSTEAMS = "MSTEAMS"
TELEGRAM = "TELEGRAM"
MOBILE_APP = "MOBILE_APP"
def write_chatops_insight_log(author, event_name: ChatOpsEvent, chatops_type: ChatOpsType, **kwargs):

View file

@ -1,15 +1,20 @@
from django.apps import apps
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
def is_insight_logs_enabled(organization):
"""
is_insight_logs_enabled checks if inside logs enabled for given organization.
Now it checks if oncall is deployed on same cluster that its grafana instance to be able to forward logs.
Or if it's Open Source :)
"""
DynamicSetting = apps.get_model("base", "DynamicSetting")
org_id_to_enable_insight_logs, _ = DynamicSetting.objects.get_or_create(
name="org_id_to_enable_insight_logs",
defaults={"json_value": []},
logger.info(
"is_insight_logs_enabled: "
f"IS_OPEN_SOURCE={settings.IS_OPEN_SOURCE} "
f"ONCALL_BACKEND_REGION={settings.ONCALL_BACKEND_REGION} "
f"cluster_slug={organization.cluster_slug}"
)
log_all = "all" in org_id_to_enable_insight_logs.json_value
insight_logs_enabled = organization.id in org_id_to_enable_insight_logs.json_value
return log_all or insight_logs_enabled
return settings.IS_OPEN_SOURCE or settings.ONCALL_BACKEND_REGION == organization.cluster_slug

View file

@ -55,6 +55,7 @@ from apps.base.tests.factories import (
)
from apps.email.tests.factories import EmailMessageFactory
from apps.heartbeat.tests.factories import IntegrationHeartBeatFactory
from apps.mobile_app.models import MobileAppVerificationToken
from apps.schedules.tests.factories import (
CustomOnCallShiftFactory,
OnCallScheduleCalendarFactory,
@ -175,6 +176,14 @@ def make_token_for_organization():
return _make_token_for_organization
@pytest.fixture
def make_mobile_app_verification_token_for_user():
def _make_mobile_app_verification_token_for_user(user, organization):
return MobileAppVerificationToken.create_auth_token(user, organization)
return _make_mobile_app_verification_token_for_user
@pytest.fixture
def make_public_api_token():
def _make_public_api_token(user, organization, name="test_api_token"):
@ -643,6 +652,20 @@ def make_organization_and_user_with_plugin_token(make_organization_and_user, mak
return _make_organization_and_user_with_plugin_token
@pytest.fixture()
def make_organization_and_user_with_mobile_app_verification_token(
make_organization_and_user, make_mobile_app_verification_token_for_user
):
def _make_organization_and_user_with_mobile_app_verification_token(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization, user = make_organization_and_user(role)
_, token = make_mobile_app_verification_token_for_user(user, organization)
return organization, user, token
return _make_organization_and_user_with_mobile_app_verification_token
@pytest.fixture()
def mock_send_user_notification_signal(monkeypatch):
def mocked_send_signal(*args, **kwargs):

View file

@ -583,6 +583,7 @@ SELF_HOSTED_SETTINGS = {
"ORG_TITLE": "Self-Hosted Organization",
"REGION_SLUG": "self_hosted_region",
"GRAFANA_API_URL": os.environ.get("GRAFANA_API_URL", default=None),
"CLUSTER_SLUG": "self_hosted_cluster",
}
GRAFANA_INCIDENT_STATIC_API_KEY = os.environ.get("GRAFANA_INCIDENT_STATIC_API_KEY", None)