From 6e35aadc0ccfbbf9387dad6b4298f1272f8d6210 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Fri, 1 Dec 2023 14:45:18 -0300 Subject: [PATCH 01/14] Add test ensuring integration endpoints work if redis cache is down (#3445) --- engine/apps/integrations/tests/test_views.py | 104 +++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/engine/apps/integrations/tests/test_views.py b/engine/apps/integrations/tests/test_views.py index 80274a62..97fe1a59 100644 --- a/engine/apps/integrations/tests/test_views.py +++ b/engine/apps/integrations/tests/test_views.py @@ -22,6 +22,17 @@ class DatabaseBlocker(_DatabaseBlocker): raise OperationalError("Database access disabled") +def setup_failing_redis_cache(settings): + settings.DJANGO_REDIS_IGNORE_EXCEPTIONS = True + settings.RATELIMIT_FAIL_OPEN = True + settings.CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://no-redis-here/", + } + } + + @pytest.mark.django_db def test_integration_json_data_too_big(settings, make_organization_and_user, make_alert_receive_channel): settings.DATA_UPLOAD_MAX_MEMORY_SIZE = 50 @@ -293,3 +304,96 @@ def test_integration_grafana_endpoint_without_db_has_alerts( call((alert_receive_channel.pk, data["alerts"][1])), ] ) + + +@patch("apps.integrations.views.create_alert") +@pytest.mark.parametrize( + "integration_type", + [ + arc_type + for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] + ], +) +@pytest.mark.django_db +def test_integration_universal_endpoint_works_without_cache( + mock_create_alert, + make_organization_and_user, + make_alert_receive_channel, + integration_type, + settings, +): + # setup failing redis cache and ignore exception settings + setup_failing_redis_cache(settings) + + organization, user = make_organization_and_user() + alert_receive_channel = make_alert_receive_channel( + organization=organization, + author=user, + integration=integration_type, + ) + + client = APIClient() + url = reverse( + "integrations:universal", + kwargs={"integration_type": integration_type, "alert_channel_key": alert_receive_channel.token}, + ) + data = {"foo": "bar"} + response = client.post(url, data, format="json") + + assert response.status_code == status.HTTP_200_OK + + mock_create_alert.apply_async.assert_called_once_with( + [], + { + "title": None, + "message": None, + "image_url": None, + "link_to_upstream_details": None, + "alert_receive_channel_pk": alert_receive_channel.pk, + "integration_unique_data": None, + "raw_request_data": data, + }, + ) + + +@patch("apps.integrations.views.create_alertmanager_alerts") +@pytest.mark.django_db +def test_integration_grafana_endpoint_without_cache_has_alerts( + mock_create_alertmanager_alerts, settings, make_organization_and_user, make_alert_receive_channel +): + settings.DEBUG = False + # setup failing redis cache and ignore exception settings + setup_failing_redis_cache(settings) + + integration_type = "grafana" + organization, user = make_organization_and_user() + alert_receive_channel = make_alert_receive_channel( + organization=organization, + author=user, + integration=integration_type, + ) + + client = APIClient() + url = reverse("integrations:grafana", kwargs={"alert_channel_key": alert_receive_channel.token}) + + data = { + "alerts": [ + { + "foo": 123, + }, + { + "foo": 456, + }, + ] + } + response = client.post(url, data, format="json") + + assert response.status_code == status.HTTP_200_OK + + mock_create_alertmanager_alerts.apply_async.assert_has_calls( + [ + call((alert_receive_channel.pk, data["alerts"][0])), + call((alert_receive_channel.pk, data["alerts"][1])), + ] + ) From ea4f6926464e6d7b952e7275c3eb6625f81973d1 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Mon, 4 Dec 2023 11:54:11 +0000 Subject: [PATCH 02/14] Pin markdown2 to 2.4.10 (#3494) # What this PR does Pins markdown to 2.4.10 to fix [failing test](https://github.com/grafana/oncall/actions/runs/7082829248/job/19276914906?pr=3490). ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- engine/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/engine/requirements.txt b/engine/requirements.txt index e7253048..c0b43a0e 100644 --- a/engine/requirements.txt +++ b/engine/requirements.txt @@ -54,3 +54,4 @@ lxml==4.9.2 babel==2.12.1 drf-spectacular==0.26.5 grpcio==1.57.0 +markdown2==2.4.10 From a3e3d8bc9d67464ac1270e6679cdb421562ab58e Mon Sep 17 00:00:00 2001 From: Innokentii Konstantinov Date: Mon, 4 Dec 2023 20:45:07 +0800 Subject: [PATCH 03/14] Change labels feature flag to work per oncall org (#3493) It's needed because anyway labels plugin provisioned per stack, not per org --------- Co-authored-by: Yulya Artyukhina --- engine/apps/labels/tests/test_labels.py | 8 ++++---- engine/apps/labels/utils.py | 5 +---- engine/conftest.py | 2 +- engine/settings/base.py | 4 ++-- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/engine/apps/labels/tests/test_labels.py b/engine/apps/labels/tests/test_labels.py index b4d116c2..541beca4 100644 --- a/engine/apps/labels/tests/test_labels.py +++ b/engine/apps/labels/tests/test_labels.py @@ -16,18 +16,18 @@ def test_labels_feature_flag(mock_is_labels_feature_enabled_for_org, make_organi organization = make_organization() # returns True if feature flag is enabled assert settings.FEATURE_LABELS_ENABLED_FOR_ALL - assert organization.id not in settings.FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS + assert organization.id not in settings.FEATURE_LABELS_ENABLED_PER_ORG assert is_labels_feature_enabled(organization) - mock_is_labels_feature_enabled_for_org(organization.org_id) + mock_is_labels_feature_enabled_for_org(organization.id) # returns True if feature flag is disabled and organization is in the feature list assert not settings.FEATURE_LABELS_ENABLED_FOR_ALL - assert organization.org_id in settings.FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS + assert organization.id in settings.FEATURE_LABELS_ENABLED_PER_ORG assert is_labels_feature_enabled(organization) mock_is_labels_feature_enabled_for_org(12345) # returns False if feature flag is disabled and organization is not in the feature list - assert organization.org_id not in settings.FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS + assert organization.org_id not in settings.FEATURE_LABELS_ENABLED_PER_ORG assert not is_labels_feature_enabled(organization) diff --git a/engine/apps/labels/utils.py b/engine/apps/labels/utils.py index e0369b0b..d5aeee32 100644 --- a/engine/apps/labels/utils.py +++ b/engine/apps/labels/utils.py @@ -51,10 +51,7 @@ def get_associating_label_model(obj_model_name: str) -> typing.Type["AssociatedL def is_labels_feature_enabled(organization: "Organization") -> bool: - return ( - settings.FEATURE_LABELS_ENABLED_FOR_ALL - or organization.org_id in settings.FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS # Grafana org ID, not OnCall org ID - ) + return settings.FEATURE_LABELS_ENABLED_FOR_ALL or organization.id in settings.FEATURE_LABELS_ENABLED_PER_ORG def get_labels_dict(labelable) -> dict[str, str]: diff --git a/engine/conftest.py b/engine/conftest.py index 6a69915d..80c8c91d 100644 --- a/engine/conftest.py +++ b/engine/conftest.py @@ -200,7 +200,7 @@ def clear_ical_users_cache(): def mock_is_labels_feature_enabled_for_org(settings): def _mock_is_labels_feature_enabled_for_org(org_id): settings.FEATURE_LABELS_ENABLED_FOR_ALL = False - settings.FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS = [org_id] + settings.FEATURE_LABELS_ENABLED_PER_ORG = [org_id] return _mock_is_labels_feature_enabled_for_org diff --git a/engine/settings/base.py b/engine/settings/base.py index 2ce633d1..5b6bf177 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -69,8 +69,8 @@ GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED = getenv_boolean("GRAFANA_CLOUD_ONCALL_HE GRAFANA_CLOUD_NOTIFICATIONS_ENABLED = getenv_boolean("GRAFANA_CLOUD_NOTIFICATIONS_ENABLED", default=True) # Enable labels feature fo all organizations. This flag overrides FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS FEATURE_LABELS_ENABLED_FOR_ALL = getenv_boolean("FEATURE_LABELS_ENABLED_FOR_ALL", default=False) -# Enable labels feature for organizations from the list. Use Grafana org_id, not OnCall id, for this flag -FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS = getenv_list("FEATURE_LABELS_ENABLED_FOR_GRAFANA_ORGS", default=list()) +# Enable labels feature for organizations from the list. Use OnCall organization ID, for this flag +FEATURE_LABELS_ENABLED_PER_ORG = getenv_list("FEATURE_LABELS_ENABLED_PER_ORG", default=list()) TWILIO_API_KEY_SID = os.environ.get("TWILIO_API_KEY_SID") TWILIO_API_KEY_SECRET = os.environ.get("TWILIO_API_KEY_SECRET") From 9eb09c02721e40f2e57112b2b0aa5d061dbdf4f4 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Mon, 4 Dec 2023 10:08:59 -0300 Subject: [PATCH 04/14] Refactor gcom api calls when syncing org (#3489) Make one API call instead of two. --- engine/apps/grafana_plugin/helpers/client.py | 6 +-- .../tests/test_gcom_api_client.py | 13 ++----- engine/apps/user_management/sync.py | 29 ++++++--------- .../apps/user_management/tests/test_sync.py | 37 +++++++++++++------ 4 files changed, 42 insertions(+), 43 deletions(-) diff --git a/engine/apps/grafana_plugin/helpers/client.py b/engine/apps/grafana_plugin/helpers/client.py index d9e7faaa..671386b1 100644 --- a/engine/apps/grafana_plugin/helpers/client.py +++ b/engine/apps/grafana_plugin/helpers/client.py @@ -371,11 +371,7 @@ class GcomAPIClient(APIClient): or feature_enabled_via_enable_key_comma_delimited ) - def is_rbac_enabled_for_stack(self, stack_id: str) -> bool: - """ - NOTE: must use an "Admin" GCOM token when calling this method - """ - instance_info = self.get_instance_info(stack_id, True) + def is_rbac_enabled_for_instance(self, instance_info: GCOMInstanceInfo = None) -> bool: if not instance_info: return False return self._feature_toggle_is_enabled(instance_info, "accessControlOnCall") diff --git a/engine/apps/grafana_plugin/tests/test_gcom_api_client.py b/engine/apps/grafana_plugin/tests/test_gcom_api_client.py index a8c3f67d..40d5b836 100644 --- a/engine/apps/grafana_plugin/tests/test_gcom_api_client.py +++ b/engine/apps/grafana_plugin/tests/test_gcom_api_client.py @@ -12,7 +12,7 @@ class TestIsRbacEnabledForStack: TEST_FEATURE_TOGGLE = "helloWorld" @pytest.mark.parametrize( - "gcom_api_response,expected", + "instance_info,expected", [ (None, False), ({}, False), @@ -22,16 +22,9 @@ class TestIsRbacEnabledForStack: ({"config": {"feature_toggles": {"accessControlOnCall": "true"}}}, True), ], ) - @patch("apps.grafana_plugin.helpers.client.GcomAPIClient.api_get") - def test_it_returns_based_on_feature_toggle_value( - self, mocked_gcom_api_client_api_get, gcom_api_response, expected - ): - stack_id = 5 - mocked_gcom_api_client_api_get.return_value = (gcom_api_response, {"status_code": 200}) - + def test_it_returns_based_on_feature_toggle_value(self, instance_info, expected): api_client = GcomAPIClient("someFakeApiToken") - assert api_client.is_rbac_enabled_for_stack(stack_id) == expected - assert mocked_gcom_api_client_api_get.called_once_with(f"instances/{stack_id}?config=true") + assert api_client.is_rbac_enabled_for_instance(instance_info) == expected @pytest.mark.parametrize( "instance_info_feature_toggles,delimiter,expected", diff --git a/engine/apps/user_management/sync.py b/engine/apps/user_management/sync.py index ebbcf6fd..e95245b6 100644 --- a/engine/apps/user_management/sync.py +++ b/engine/apps/user_management/sync.py @@ -13,24 +13,9 @@ logger.setLevel(logging.DEBUG) def sync_organization(organization: Organization) -> None: - grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) - - # NOTE: checking whether or not RBAC is enabled depends on whether we are dealing with an open-source or cloud - # stack. For Cloud we should make a call to the GCOM API, using an admin API token, and get the list of - # feature_toggles enabled for the stack. For open-source, simply make a HEAD request to the grafana instance's API - # and consider RBAC enabled if the list RBAC permissions endpoint returns 200. We cannot simply rely on the HEAD - # call in cloud because if an instance is not active, the grafana gateway will still return 200 for the - # HEAD request. - if settings.LICENSE == settings.CLOUD_LICENSE_NAME: - gcom_client = GcomAPIClient(settings.GRAFANA_COM_ADMIN_API_TOKEN) - rbac_is_enabled = gcom_client.is_rbac_enabled_for_stack(organization.stack_id) - else: - rbac_is_enabled = grafana_api_client.is_rbac_enabled_for_organization() - - organization.is_rbac_permissions_enabled = rbac_is_enabled - _sync_instance_info(organization) + grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) _, check_token_call_status = grafana_api_client.check_token() if check_token_call_status["connected"]: organization.api_token_status = Organization.API_TOKEN_STATUS_OK @@ -62,7 +47,7 @@ def sync_organization(organization: Organization) -> None: def _sync_instance_info(organization: Organization) -> None: if organization.gcom_token: gcom_client = GcomAPIClient(organization.gcom_token) - instance_info = gcom_client.get_instance_info(organization.stack_id) + instance_info = gcom_client.get_instance_info(organization.stack_id, include_config_query_param=True) if not instance_info or instance_info["orgId"] != organization.org_id: return @@ -73,7 +58,17 @@ def _sync_instance_info(organization: Organization) -> None: organization.region_slug = instance_info["regionSlug"] organization.grafana_url = instance_info["url"] organization.cluster_slug = instance_info["clusterSlug"] + organization.is_rbac_permissions_enabled = gcom_client.is_rbac_enabled_for_instance(instance_info) organization.gcom_token_org_last_time_synced = timezone.now() + else: + # NOTE: checking whether or not RBAC is enabled depends on whether we are dealing with an open-source or cloud + # stack. For Cloud we should make a call to the GCOM API, using an admin API token, and get the list of + # feature_toggles enabled for the stack. For open-source, simply make a HEAD request to the grafana instance's API + # and consider RBAC enabled if the list RBAC permissions endpoint returns 200. We cannot simply rely on the HEAD + # call in cloud because if an instance is not active, the grafana gateway will still return 200 for the + # HEAD request. + grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) + organization.is_rbac_permissions_enabled = grafana_api_client.is_rbac_enabled_for_organization() def sync_users_and_teams(client: GrafanaAPIClient, organization: Organization) -> None: diff --git a/engine/apps/user_management/tests/test_sync.py b/engine/apps/user_management/tests/test_sync.py index 86d4881b..76537cef 100644 --- a/engine/apps/user_management/tests/test_sync.py +++ b/engine/apps/user_management/tests/test_sync.py @@ -312,18 +312,36 @@ def test_sync_organization_is_rbac_permissions_enabled_open_source(make_organiza assert organization.is_rbac_permissions_enabled == grafana_api_response -@pytest.mark.parametrize("gcom_api_response", [False, True]) -@patch("apps.user_management.sync.GcomAPIClient") -@override_settings(LICENSE=settings.CLOUD_LICENSE_NAME) -@override_settings(GRAFANA_COM_ADMIN_API_TOKEN="mockedToken") +@patch("apps.user_management.sync.GcomAPIClient.api_get") +@pytest.mark.parametrize( + "instance_info,expected", + [ + ({"config": {"feature_toggles": {}}}, False), + ({"config": {"feature_toggles": {"accessControlOnCall": "false"}}}, False), + ({"config": {"feature_toggles": {"accessControlOnCall": "true"}}}, True), + ], +) @pytest.mark.django_db -def test_sync_organization_is_rbac_permissions_enabled_cloud(mocked_gcom_client, make_organization, gcom_api_response): +def test_sync_organization_is_rbac_permissions_enabled_cloud( + mocked_gcom_client, make_organization, instance_info, expected +): stack_id = 5 - organization = make_organization(stack_id=stack_id) + organization = make_organization(stack_id=stack_id, gcom_token="TEST_GCOM_TOKEN") api_check_token_call_status = {"connected": True} - mocked_gcom_client.return_value.is_rbac_enabled_for_stack.return_value = gcom_api_response + instance_info.update( + { + "orgId": organization.org_id, + "slug": organization.stack_slug, + "orgSlug": organization.org_slug, + "orgName": organization.org_title, + "regionSlug": organization.region_slug, + "url": organization.grafana_url, + "clusterSlug": organization.cluster_slug, + } + ) + mocked_gcom_client.return_value = (instance_info, {"status_code": 200}) api_users_response = ( { @@ -367,10 +385,7 @@ def test_sync_organization_is_rbac_permissions_enabled_cloud(mocked_gcom_client, sync_organization(organization) organization.refresh_from_db() - - assert mocked_gcom_client.return_value.called_once_with("mockedToken") - assert mocked_gcom_client.return_value.is_rbac_enabled_for_stack.called_once_with(stack_id) - assert organization.is_rbac_permissions_enabled == gcom_api_response + assert organization.is_rbac_permissions_enabled == expected @pytest.mark.django_db From 4ccfda58e5f222a0a283eb954212b38ff747905c Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Mon, 4 Dec 2023 13:13:53 +0000 Subject: [PATCH 05/14] Disallow creating and deleting direct paging integrations (#3475) # What this PR does Disallows creating and deleting direct paging integrations via both internal and public APIs. It also hides the direct paging option in the UI when creating a new integration. ## Which issue(s) this PR fixes Related to https://github.com/grafana/oncall-private/issues/2302 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --------- Co-authored-by: Dominik --- CHANGELOG.md | 4 ++ .../api/serializers/alert_receive_channel.py | 7 ++- .../api/tests/test_alert_receive_channel.py | 55 ++++++++--------- .../apps/api/views/alert_receive_channel.py | 8 +++ .../public_api/serializers/integrations.py | 2 + .../public_api/tests/test_integrations.py | 37 +++++------ engine/apps/public_api/views/integrations.py | 9 +++ .../IntegrationForm/IntegrationForm.tsx | 5 ++ .../src/pages/integration/Integration.tsx | 61 ++++++++++--------- 9 files changed, 103 insertions(+), 85 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50c75e54..b768aaa7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Changed + +- Disallow creating and deleting direct paging integrations by @vadimkerr ([#3475](https://github.com/grafana/oncall/pull/3475)) + ## v1.3.70 (2023-12-01) Maintenance release diff --git a/engine/apps/api/serializers/alert_receive_channel.py b/engine/apps/api/serializers/alert_receive_channel.py index 0aae34b6..3e1ecc30 100644 --- a/engine/apps/api/serializers/alert_receive_channel.py +++ b/engine/apps/api/serializers/alert_receive_channel.py @@ -349,6 +349,10 @@ class AlertReceiveChannelSerializer( def validate_integration(integration): if integration is None or integration not in AlertReceiveChannel.WEB_INTEGRATION_CHOICES: raise BadRequest(detail="invalid integration") + + if integration == AlertReceiveChannel.INTEGRATION_DIRECT_PAGING: + raise BadRequest(detail="Direct paging integrations can't be created") + return integration def validate_verbal_name(self, verbal_name): @@ -372,7 +376,8 @@ class AlertReceiveChannelSerializer( return IntegrationHeartBeatSerializer(heartbeat).data def get_allow_delete(self, obj: "AlertReceiveChannel"): - return True + # don't allow deleting direct paging integrations + return obj.integration != AlertReceiveChannel.INTEGRATION_DIRECT_PAGING def get_alert_count(self, obj: "AlertReceiveChannel"): return 0 diff --git a/engine/apps/api/tests/test_alert_receive_channel.py b/engine/apps/api/tests/test_alert_receive_channel.py index 8dac1b4b..3f8487c8 100644 --- a/engine/apps/api/tests/test_alert_receive_channel.py +++ b/engine/apps/api/tests/test_alert_receive_channel.py @@ -785,45 +785,17 @@ def test_get_alert_receive_channels_direct_paging_present_for_filters( @pytest.mark.django_db -def test_create_alert_receive_channels_direct_paging( +def test_cant_create_alert_receive_channels_direct_paging( make_organization_and_user_with_plugin_token, make_team, make_alert_receive_channel, make_user_auth_headers ): organization, user, token = make_organization_and_user_with_plugin_token() - team = make_team(organization) client = APIClient() url = reverse("api-internal:alert_receive_channel-list") - - response_1 = client.post( + response = client.post( url, data={"integration": "direct_paging"}, format="json", **make_user_auth_headers(user, token) ) - response_2 = client.post( - url, data={"integration": "direct_paging"}, format="json", **make_user_auth_headers(user, token) - ) - - response_3 = client.post( - url, - data={"integration": "direct_paging", "team": team.public_primary_key}, - format="json", - **make_user_auth_headers(user, token), - ) - response_4 = client.post( - url, - data={"integration": "direct_paging", "team": team.public_primary_key}, - format="json", - **make_user_auth_headers(user, token), - ) - - # Check direct paging integration for "No team" is created - assert response_1.status_code == status.HTTP_201_CREATED - # Check direct paging integration is not created, as it already exists for "No team" - assert response_2.status_code == status.HTTP_400_BAD_REQUEST - - # Check direct paging integration for team is created - assert response_3.status_code == status.HTTP_201_CREATED - # Check direct paging integration is not created, as it already exists for team - assert response_4.status_code == status.HTTP_400_BAD_REQUEST - assert response_4.json()["detail"] == AlertReceiveChannel.DuplicateDirectPagingError.DETAIL + assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db @@ -852,6 +824,27 @@ def test_update_alert_receive_channels_direct_paging( assert response.json()["detail"] == AlertReceiveChannel.DuplicateDirectPagingError.DETAIL +@pytest.mark.django_db +def test_cant_delete_direct_paging_integration( + make_organization_and_user_with_plugin_token, make_alert_receive_channel, make_user_auth_headers +): + organization, user, token = make_organization_and_user_with_plugin_token() + integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING) + + # check allow_delete is False (so the frontend can hide the delete button) + client = APIClient() + url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": integration.public_primary_key}) + response = client.get(url, **make_user_auth_headers(user, token)) + assert response.status_code == status.HTTP_200_OK + assert response.json()["allow_delete"] is False + + # check delete is not allowed + client = APIClient() + url = reverse("api-internal:alert_receive_channel-detail", kwargs={"pk": integration.public_primary_key}) + response = client.delete(url, **make_user_auth_headers(user, token)) + assert response.status_code == status.HTTP_400_BAD_REQUEST + + @pytest.mark.django_db def test_start_maintenance_integration( make_user_auth_headers, diff --git a/engine/apps/api/views/alert_receive_channel.py b/engine/apps/api/views/alert_receive_channel.py index 9edab2dc..b0309a7a 100644 --- a/engine/apps/api/views/alert_receive_channel.py +++ b/engine/apps/api/views/alert_receive_channel.py @@ -134,6 +134,14 @@ class AlertReceiveChannelView( new_state=new_state, ) + def destroy(self, request, *args, **kwargs): + # don't allow deleting direct paging integrations + instance = self.get_object() + if instance.integration == AlertReceiveChannel.INTEGRATION_DIRECT_PAGING: + raise BadRequest(detail="Direct paging integrations can't be deleted") + + return super().destroy(request, *args, **kwargs) + def perform_destroy(self, instance): write_resource_insight_log( instance=instance, diff --git a/engine/apps/public_api/serializers/integrations.py b/engine/apps/public_api/serializers/integrations.py index 3dafad78..af612011 100644 --- a/engine/apps/public_api/serializers/integrations.py +++ b/engine/apps/public_api/serializers/integrations.py @@ -67,6 +67,8 @@ class IntegrationTypeField(fields.CharField): raise BadRequest(detail="Invalid integration type") if has_legacy_prefix(data): raise BadRequest("This integration type is deprecated") + if data == AlertReceiveChannel.INTEGRATION_DIRECT_PAGING: + raise BadRequest(detail="Direct paging integrations can't be created") return data diff --git a/engine/apps/public_api/tests/test_integrations.py b/engine/apps/public_api/tests/test_integrations.py index 8e5dd150..0d7a3045 100644 --- a/engine/apps/public_api/tests/test_integrations.py +++ b/engine/apps/public_api/tests/test_integrations.py @@ -820,35 +820,15 @@ def test_update_integration_default_route( @pytest.mark.django_db -def test_create_integrations_direct_paging( +def test_cant_create_integrations_direct_paging( make_organization_and_user_with_token, make_team, make_alert_receive_channel, make_user_auth_headers ): organization, _, token = make_organization_and_user_with_token() - team = make_team(organization) client = APIClient() url = reverse("api-public:integrations-list") - - response_1 = client.post(url, data={"type": "direct_paging"}, format="json", HTTP_AUTHORIZATION=token) - response_2 = client.post(url, data={"type": "direct_paging"}, format="json", HTTP_AUTHORIZATION=token) - - response_3 = client.post( - url, data={"type": "direct_paging", "team_id": team.public_primary_key}, format="json", HTTP_AUTHORIZATION=token - ) - response_4 = client.post( - url, data={"type": "direct_paging", "team_id": team.public_primary_key}, format="json", HTTP_AUTHORIZATION=token - ) - - # Check direct paging integration for "No team" is created - assert response_1.status_code == status.HTTP_201_CREATED - # Check direct paging integration is not created, as it already exists for "No team" - assert response_2.status_code == status.HTTP_400_BAD_REQUEST - - # Check direct paging integration for team is created - assert response_3.status_code == status.HTTP_201_CREATED - # Check direct paging integration is not created, as it already exists for team - assert response_4.status_code == status.HTTP_400_BAD_REQUEST - assert response_4.data["detail"] == AlertReceiveChannel.DuplicateDirectPagingError.DETAIL + response = client.post(url, data={"type": "direct_paging"}, format="json", HTTP_AUTHORIZATION=token) + assert response.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.django_db @@ -873,6 +853,17 @@ def test_update_integrations_direct_paging( assert response.data["detail"] == AlertReceiveChannel.DuplicateDirectPagingError.DETAIL +@pytest.mark.django_db +def test_cant_delete_direct_paging_integration(make_organization_and_user_with_token, make_alert_receive_channel): + organization, user, token = make_organization_and_user_with_token() + integration = make_alert_receive_channel(organization, integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING) + + client = APIClient() + url = reverse("api-public:integrations-detail", args=[integration.public_primary_key]) + response = client.delete(url, HTTP_AUTHORIZATION=token) + assert response.status_code == status.HTTP_400_BAD_REQUEST + + @pytest.mark.django_db def test_get_integration_type_legacy( make_organization_and_user_with_token, make_alert_receive_channel, make_channel_filter, make_integration_heartbeat diff --git a/engine/apps/public_api/views/integrations.py b/engine/apps/public_api/views/integrations.py index 5bcb92fb..358f5420 100644 --- a/engine/apps/public_api/views/integrations.py +++ b/engine/apps/public_api/views/integrations.py @@ -8,6 +8,7 @@ from apps.alerts.models import AlertReceiveChannel from apps.auth_token.auth import ApiTokenAuthentication from apps.public_api.serializers import IntegrationSerializer, IntegrationUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle +from common.api_helpers.exceptions import BadRequest from common.api_helpers.filters import ByTeamFilter from common.api_helpers.mixins import FilterSerializerMixin, RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator @@ -70,6 +71,14 @@ class IntegrationView( new_state=new_state, ) + def destroy(self, request, *args, **kwargs): + # don't allow deleting direct paging integrations + instance = self.get_object() + if instance.integration == AlertReceiveChannel.INTEGRATION_DIRECT_PAGING: + raise BadRequest(detail="Direct paging integrations can't be deleted") + + return super().destroy(request, *args, **kwargs) + def perform_destroy(self, instance): write_resource_insight_log(instance=instance, author=self.request.user, event=EntityEvent.DELETED) instance.delete() diff --git a/grafana-plugin/src/containers/IntegrationForm/IntegrationForm.tsx b/grafana-plugin/src/containers/IntegrationForm/IntegrationForm.tsx index d2847e57..03d24f91 100644 --- a/grafana-plugin/src/containers/IntegrationForm/IntegrationForm.tsx +++ b/grafana-plugin/src/containers/IntegrationForm/IntegrationForm.tsx @@ -88,6 +88,11 @@ const IntegrationForm = observer((props: IntegrationFormProps) => { return false; } + // don't allow creating direct paging integrations + if (option.value === 'direct_paging') { + return false; + } + return ( option.display_name.toLowerCase().includes(filterValue.toLowerCase()) && !option.value.toLowerCase().startsWith('legacy_') diff --git a/grafana-plugin/src/pages/integration/Integration.tsx b/grafana-plugin/src/pages/integration/Integration.tsx index 17c7848c..ab32cbb4 100644 --- a/grafana-plugin/src/pages/integration/Integration.tsx +++ b/grafana-plugin/src/pages/integration/Integration.tsx @@ -34,6 +34,7 @@ import IntegrationBlock from 'components/Integrations/IntegrationBlock'; import PageErrorHandlingWrapper, { PageBaseState } from 'components/PageErrorHandlingWrapper/PageErrorHandlingWrapper'; import { initErrorDataState } from 'components/PageErrorHandlingWrapper/PageErrorHandlingWrapper.helpers'; import PluginLink from 'components/PluginLink/PluginLink'; +import RenderConditionally from 'components/RenderConditionally/RenderConditionally'; import Tag from 'components/Tag/Tag'; import Text from 'components/Text/Text'; import TooltipBadge from 'components/TooltipBadge/TooltipBadge'; @@ -958,37 +959,37 @@ const IntegrationActions: React.FC = ({ - -
- - -
-
{ - setConfirmModal({ - isOpen: true, - title: 'Delete Integration?', - body: ( - - Are you sure you want to delete ? - - ), - onConfirm: deleteIntegration, - dismissText: 'Cancel', - confirmText: 'Delete', - }); - }} - className="u-width-100" - > - - - - Delete Integration - - + +
+ +
+
{ + setConfirmModal({ + isOpen: true, + title: 'Delete Integration?', + body: ( + + Are you sure you want to delete ? + + ), + onConfirm: deleteIntegration, + dismissText: 'Cancel', + confirmText: 'Delete', + }); + }} + className="u-width-100" + > + + + + Delete Integration + + +
-
- + +
)} > From 9796489b8ec0738a663e2b81a37645c726c6f6a3 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Mon, 4 Dec 2023 13:46:03 +0000 Subject: [PATCH 06/14] Skip empty alert group labels (#3495) # What this PR does Makes sure there are no empty alert group label values. ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- engine/apps/labels/alert_group_labels.py | 12 ++++++++++++ engine/apps/labels/tests/test_alert_group.py | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/engine/apps/labels/alert_group_labels.py b/engine/apps/labels/alert_group_labels.py index 68ef6ead..450ff40d 100644 --- a/engine/apps/labels/alert_group_labels.py +++ b/engine/apps/labels/alert_group_labels.py @@ -103,6 +103,10 @@ def _custom_labels(alert_receive_channel: "AlertReceiveChannel", raw_request_dat value = rendered_labels[key] # check value length + if len(value) == 0: + logger.warning("Template result value is empty. %s", value) + continue + if len(value) > MAX_VALUE_NAME_LENGTH: logger.warning("Template result value is too long. %s", value) continue @@ -147,11 +151,19 @@ def _template_labels(alert_receive_channel: "AlertReceiveChannel", raw_request_d value = str(value) # check key length + if len(key) == 0: + logger.warning("Template result key is empty. %s", key) + continue + if len(key) > MAX_KEY_NAME_LENGTH: logger.warning("Template result key is too long. %s", key) continue # check value length + if len(value) == 0: + logger.warning("Template result value is empty. %s", value) + continue + if len(value) > MAX_VALUE_NAME_LENGTH: logger.warning("Template result value is too long. %s", value) continue diff --git a/engine/apps/labels/tests/test_alert_group.py b/engine/apps/labels/tests/test_alert_group.py index 935e15af..dd882800 100644 --- a/engine/apps/labels/tests/test_alert_group.py +++ b/engine/apps/labels/tests/test_alert_group.py @@ -46,6 +46,7 @@ def test_assign_labels( label_key_1 = make_label_key(organization=organization, key_name="c") label_key_2 = make_label_key(organization=organization) label_key_3 = make_label_key(organization=organization) + label_key_4 = make_label_key(organization=organization) # create alert receive channel with all 3 types of labels alert_receive_channel = make_alert_receive_channel( @@ -56,6 +57,7 @@ def test_assign_labels( [label_key_2.id, "nonexistent", None], # plain label with nonexistent value ID [label_key_1.id, None, "{{ payload.c }}"], # templated label [label_key_3.id, None, TOO_LONG_VALUE_NAME], # templated label too long + [label_key_4.id, None, "{{ payload.nonexistent }}"], # templated label with nonexistent key ], alert_group_labels_template="{{ payload.advanced_template | tojson }}", ) @@ -94,8 +96,6 @@ def test_assign_labels( def test_assign_labels_custom_labels_none( make_organization, make_alert_receive_channel, - make_label_key_and_value, - make_label_key, make_integration_label_association, ): organization = make_organization() From 1df1b1eaa0941d9fb84bc55f044b647f353b7402 Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Mon, 4 Dec 2023 13:08:57 -0500 Subject: [PATCH 07/14] patch redis cluster multi-key operations (#3496) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Which issue(s) this PR fixes Related to https://github.com/grafana/oncall-private/issues/2363 Addresses this issue that arises when using `cache.get_many`/`cache.set_many` operations with a Redis Cluster: ```python3 File "/usr/local/lib/python3.11/site-packages/redis/cluster.py", line 1006, in determine_slot raise RedisClusterException( redis.exceptions.RedisClusterException: MGET - all keys must map to the same key slot ``` From the Redis Cluster [docs](https://redis.io/docs/reference/cluster-spec/#hash-tags), this can be addressed with this 👇 . Basically this will ensure that keys in multi-key operations will resolve to the same hash slot (read: node): > Hash tags > There is an exception for the computation of the hash slot that is used in order to implement hash tags. Hash tags are a way to ensure that multiple keys are allocated in the same hash slot. This is used in order to implement multi-key operations in Redis Cluster. > > To implement hash tags, the hash slot for a key is computed in a slightly different way in certain conditions. If the key contains a "{...}" pattern only the substring between { and } is hashed in order to obtain the hash slot. However since it is possible that there are multiple occurrences of { or } the algorithm is well specified by the following rules: > > IF the key contains a { character. > AND IF there is a } character to the right of {. > AND IF there are one or more characters between the first occurrence of { and the first occurrence of }. > Then instead of hashing the key, only what is between the first occurrence of { and the following first occurrence of } is hashed. ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- engine/apps/metrics_exporter/helpers.py | 22 ++++---- .../tasks/going_oncall_notification.py | 6 ++- engine/apps/schedules/ical_utils.py | 16 ++++-- .../apps/schedules/tests/test_ical_utils.py | 2 +- engine/common/cache.py | 41 +++++++++++++++ engine/common/tests/test_cache.py | 51 +++++++++++++++++++ engine/settings/base.py | 1 + 7 files changed, 125 insertions(+), 14 deletions(-) create mode 100644 engine/common/cache.py create mode 100644 engine/common/tests/test_cache.py diff --git a/engine/apps/metrics_exporter/helpers.py b/engine/apps/metrics_exporter/helpers.py index ec979f5b..b8b48ede 100644 --- a/engine/apps/metrics_exporter/helpers.py +++ b/engine/apps/metrics_exporter/helpers.py @@ -22,6 +22,7 @@ from apps.metrics_exporter.constants import ( RecalculateMetricsTimer, UserWasNotifiedOfAlertGroupsMetricsDict, ) +from common.cache import ensure_cache_key_allocates_to_the_same_hash_slot if typing.TYPE_CHECKING: from apps.alerts.models import AlertReceiveChannel @@ -98,24 +99,27 @@ def get_metrics_cache_timeout(organization_id): def get_metrics_cache_timer_key(organization_id) -> str: - return f"{METRICS_CACHE_TIMER}_{organization_id}" - - -def get_metrics_cache_timer_for_organization(organization_id): - key = get_metrics_cache_timer_key(organization_id) - return cache.get(key) + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{METRICS_CACHE_TIMER}_{organization_id}", METRICS_CACHE_TIMER + ) def get_metric_alert_groups_total_key(organization_id) -> str: - return f"{ALERT_GROUPS_TOTAL}_{organization_id}" + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{ALERT_GROUPS_TOTAL}_{organization_id}", ALERT_GROUPS_TOTAL + ) def get_metric_alert_groups_response_time_key(organization_id) -> str: - return f"{ALERT_GROUPS_RESPONSE_TIME}_{organization_id}" + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{ALERT_GROUPS_RESPONSE_TIME}_{organization_id}", ALERT_GROUPS_RESPONSE_TIME + ) def get_metric_user_was_notified_of_alert_groups_key(organization_id) -> str: - return f"{USER_WAS_NOTIFIED_OF_ALERT_GROUPS}_{organization_id}" + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{USER_WAS_NOTIFIED_OF_ALERT_GROUPS}_{organization_id}", USER_WAS_NOTIFIED_OF_ALERT_GROUPS + ) def get_metric_calculation_started_key(metric_name) -> str: diff --git a/engine/apps/mobile_app/tasks/going_oncall_notification.py b/engine/apps/mobile_app/tasks/going_oncall_notification.py index 05406e10..9d58a707 100644 --- a/engine/apps/mobile_app/tasks/going_oncall_notification.py +++ b/engine/apps/mobile_app/tasks/going_oncall_notification.py @@ -15,6 +15,7 @@ from apps.mobile_app.types import FCMMessageData, MessageType, Platform from apps.mobile_app.utils import MAX_RETRIES, construct_fcm_message, send_push_notification from apps.schedules.models.on_call_schedule import OnCallSchedule, ScheduleEvent from apps.user_management.models import User +from common.cache import ensure_cache_key_allocates_to_the_same_hash_slot from common.custom_celery_tasks import shared_dedicated_queue_retry_task from common.l10n import format_localized_datetime, format_localized_time @@ -164,7 +165,10 @@ def _should_we_send_push_notification( def _generate_cache_key(user_pk: str, schedule_event: ScheduleEvent) -> str: - return f"going_oncall_push_notification:{user_pk}:{schedule_event['shift']['pk']}" + KEY_PREFIX = "going_oncall_push_notification" + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{KEY_PREFIX}:{user_pk}:{schedule_event['shift']['pk']}", KEY_PREFIX + ) @shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=MAX_RETRIES) diff --git a/engine/apps/schedules/ical_utils.py b/engine/apps/schedules/ical_utils.py index e39874fd..9dede8fc 100644 --- a/engine/apps/schedules/ical_utils.py +++ b/engine/apps/schedules/ical_utils.py @@ -35,6 +35,7 @@ from apps.schedules.constants import ( RE_PRIORITY, ) from apps.schedules.ical_events import ical_events +from common.cache import ensure_cache_key_allocates_to_the_same_hash_slot from common.timezones import is_valid_timezone from common.utils import timed_lru_cache @@ -403,15 +404,24 @@ def get_cached_oncall_users_for_multiple_schedules(schedules: typing.List["OnCal from apps.schedules.models import OnCallSchedule from apps.user_management.models import User + CACHE_KEY_PREFIX = "schedule_oncall_users_" + def _generate_cache_key_for_schedule_oncall_users(schedule: "OnCallSchedule") -> str: - return f"schedule_{schedule.public_primary_key}_oncall_users" + return ensure_cache_key_allocates_to_the_same_hash_slot( + f"{CACHE_KEY_PREFIX}{schedule.public_primary_key}", CACHE_KEY_PREFIX + ) def _get_schedule_public_primary_key_from_schedule_oncall_users_cache_key(cache_key: str) -> str: - return cache_key.replace("schedule_", "").replace("_oncall_users", "") + """ + remove any brackets that might be included in the cache key (when redis cluster is active). + See `_generate_cache_key_for_schedule_oncall_users` just above + """ + cache_key = cache_key.replace("{", "").replace("}", "") + return cache_key.replace(CACHE_KEY_PREFIX, "") CACHE_TTL = 15 * 60 # 15 minutes in seconds - cache_keys: typing.List[str] = [_generate_cache_key_for_schedule_oncall_users(schedule) for schedule in schedules] + cache_keys = [_generate_cache_key_for_schedule_oncall_users(schedule) for schedule in schedules] # get_many returns a dictionary with all the keys we asked for that actually exist # in the cache (and haven’t expired) diff --git a/engine/apps/schedules/tests/test_ical_utils.py b/engine/apps/schedules/tests/test_ical_utils.py index 97e98b40..3b6cf517 100644 --- a/engine/apps/schedules/tests/test_ical_utils.py +++ b/engine/apps/schedules/tests/test_ical_utils.py @@ -584,7 +584,7 @@ def test_get_cached_oncall_users_for_multiple_schedules( return users, (schedule1, schedule2, schedule3) def _generate_cache_key(schedule): - return f"schedule_{schedule.public_primary_key}_oncall_users" + return f"schedule_oncall_users_{schedule.public_primary_key}" # scenario: nothing is cached, need to recalculate everything and cache it users, schedules = _test_setup() diff --git a/engine/common/cache.py b/engine/common/cache.py new file mode 100644 index 00000000..94047a66 --- /dev/null +++ b/engine/common/cache.py @@ -0,0 +1,41 @@ +import typing + +from django.conf import settings + +_RT = typing.TypeVar("_RT", str, typing.List[str], typing.Dict[str, typing.Any]) + + +def ensure_cache_key_allocates_to_the_same_hash_slot(cache_keys: _RT, pattern_to_wrap_in_brackets: str) -> _RT: + """ + This method will ensure that when using Redis Cluster, multiple cache keys will be allocated to the same hash slot. + This ensures that multi-key operations (ex `cache.get_many` and `cache.set_many`) will work without raising this + exception: + + ``` + File "/usr/local/lib/python3.11/site-packages/redis/cluster.py", line 1006, in determine_slot + raise RedisClusterException( + redis.exceptions.RedisClusterException: MGET - all keys must map to the same key slot + ``` + + From the Redis Cluster [docs](https://redis.io/docs/reference/cluster-spec/#hash-tags): + + There is an exception for the computation of the hash slot that is used in order to implement hash tags. + Hash tags are a way to ensure that multiple keys are allocated in the same hash slot. + This is used in order to implement multi-key operations in Redis Cluster. + + To implement hash tags, the hash slot for a key is computed in a slightly different way in certain conditions. + If the key contains a "{...}" pattern only the substring between { and } is hashed in order to obtain the hash slot. + However since it is possible that there are multiple occurrences of { or } the algorithm is well specified by the + following rules: + """ + if not settings.USE_REDIS_CLUSTER: + return cache_keys + + def _replace_key(key: str) -> str: + return key.replace(pattern_to_wrap_in_brackets, f"{{{pattern_to_wrap_in_brackets}}}") + + if isinstance(cache_keys, str): + return _replace_key(cache_keys) + elif isinstance(cache_keys, dict): + return {_replace_key(key): value for key, value in cache_keys.items()} + return [_replace_key(key) for key in cache_keys] diff --git a/engine/common/tests/test_cache.py b/engine/common/tests/test_cache.py new file mode 100644 index 00000000..6a4ab193 --- /dev/null +++ b/engine/common/tests/test_cache.py @@ -0,0 +1,51 @@ +from django.test import override_settings + +from common.cache import ensure_cache_key_allocates_to_the_same_hash_slot + +PATTERN = "schedule_oncall_users" +NON_EXISTENT_PATTERN = "nmzxcnvmzxcv" +NUM_CACHE_KEYS = 5 +SINGLE_CACHE_KEY = f"{PATTERN}_0" +CACHE_KEYS = [f"{PATTERN}_{pk}" for pk in range(NUM_CACHE_KEYS)] +SET_MANY_CACHE_KEYS_DICT = {k: "foo" for k in CACHE_KEYS} + + +def test_ensure_cache_key_allocates_to_the_same_hash_slot() -> None: + def _convert_key(key: str) -> str: + return key.replace(PATTERN, f"{{{PATTERN}}}") + + # when USE_REDIS_CLUSTER is False the method should just return the cache keys + with override_settings(USE_REDIS_CLUSTER=False): + assert ensure_cache_key_allocates_to_the_same_hash_slot(SINGLE_CACHE_KEY, PATTERN) == SINGLE_CACHE_KEY + assert ensure_cache_key_allocates_to_the_same_hash_slot(CACHE_KEYS, PATTERN) == CACHE_KEYS + assert ( + ensure_cache_key_allocates_to_the_same_hash_slot(SET_MANY_CACHE_KEYS_DICT, PATTERN) + == SET_MANY_CACHE_KEYS_DICT + ) + + # when USE_REDIS_CLUSTER is True the method should wrap the specified pattern within the cache keys in curly brackets + with override_settings(USE_REDIS_CLUSTER=True): + # works with a single str cache key + assert ensure_cache_key_allocates_to_the_same_hash_slot(SINGLE_CACHE_KEY, PATTERN) == _convert_key( + SINGLE_CACHE_KEY + ) + + # works with a list (useful for cache.get_many operations) + assert ensure_cache_key_allocates_to_the_same_hash_slot(CACHE_KEYS, PATTERN) == [ + _convert_key(k) for k in CACHE_KEYS + ] + + # works with a dict (useful for cache.set_many operations) + assert ensure_cache_key_allocates_to_the_same_hash_slot(SET_MANY_CACHE_KEYS_DICT, PATTERN) == { + _convert_key(k): v for k, v in SET_MANY_CACHE_KEYS_DICT.items() + } + + # if the pattern doesn't exist, we don't wrap it in brackets + assert ( + ensure_cache_key_allocates_to_the_same_hash_slot(SINGLE_CACHE_KEY, NON_EXISTENT_PATTERN) == SINGLE_CACHE_KEY + ) + assert ensure_cache_key_allocates_to_the_same_hash_slot(CACHE_KEYS, NON_EXISTENT_PATTERN) == CACHE_KEYS + assert ( + ensure_cache_key_allocates_to_the_same_hash_slot(SET_MANY_CACHE_KEYS_DICT, NON_EXISTENT_PATTERN) + == SET_MANY_CACHE_KEYS_DICT + ) diff --git a/engine/settings/base.py b/engine/settings/base.py index 5b6bf177..9dbc684e 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -194,6 +194,7 @@ REDIS_URI = os.getenv("REDIS_URI") if not REDIS_URI: REDIS_URI = f"{REDIS_PROTOCOL}://{REDIS_USERNAME}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DATABASE}" +USE_REDIS_CLUSTER = getenv_boolean("USE_REDIS_CLUSTER", default=False) REDIS_USE_SSL = os.getenv("REDIS_USE_SSL") REDIS_SSL_CONFIG = {} From e39baa6bbe018a6a518a64663e29133ef0ef37c6 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Mon, 4 Dec 2023 15:02:58 -0300 Subject: [PATCH 08/14] Revert "Refactor gcom api calls when syncing org" (#3498) Reverts grafana/oncall#3489 Reviewing logs, it seems something broke related to [token auth](https://ops.grafana-ops.net/explore?schemaVersion=1&panes=%7B%22ffS%22:%7B%22datasource%22:%22OP27Xzxnk%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22expr%22:%22%7Bcluster%3D~%5C%22dev-.%2A%5C%22,%20namespace%3D%5C%22grafana-com%5C%22,%20job%3D%5C%22grafana-com%2Fgrafana-com-api%5C%22%7D%20%7C~%20%5C%22%2Finstances%2F%5Ba-z0-9%5D%2B.config%3Dtrue%5C%22%20%7C%3D%20%5C%22Grafana%20OnCall%5C%22%22,%22editorMode%22:%22code%22,%22queryType%22:%22range%22,%22datasource%22:%7B%22type%22:%22loki%22,%22uid%22:%22OP27Xzxnk%22%7D%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D%7D&orgId=1). Reverting for now, will revisit in a later PR. --- engine/apps/grafana_plugin/helpers/client.py | 6 ++- .../tests/test_gcom_api_client.py | 13 +++++-- engine/apps/user_management/sync.py | 29 +++++++++------ .../apps/user_management/tests/test_sync.py | 37 ++++++------------- 4 files changed, 43 insertions(+), 42 deletions(-) diff --git a/engine/apps/grafana_plugin/helpers/client.py b/engine/apps/grafana_plugin/helpers/client.py index 671386b1..d9e7faaa 100644 --- a/engine/apps/grafana_plugin/helpers/client.py +++ b/engine/apps/grafana_plugin/helpers/client.py @@ -371,7 +371,11 @@ class GcomAPIClient(APIClient): or feature_enabled_via_enable_key_comma_delimited ) - def is_rbac_enabled_for_instance(self, instance_info: GCOMInstanceInfo = None) -> bool: + def is_rbac_enabled_for_stack(self, stack_id: str) -> bool: + """ + NOTE: must use an "Admin" GCOM token when calling this method + """ + instance_info = self.get_instance_info(stack_id, True) if not instance_info: return False return self._feature_toggle_is_enabled(instance_info, "accessControlOnCall") diff --git a/engine/apps/grafana_plugin/tests/test_gcom_api_client.py b/engine/apps/grafana_plugin/tests/test_gcom_api_client.py index 40d5b836..a8c3f67d 100644 --- a/engine/apps/grafana_plugin/tests/test_gcom_api_client.py +++ b/engine/apps/grafana_plugin/tests/test_gcom_api_client.py @@ -12,7 +12,7 @@ class TestIsRbacEnabledForStack: TEST_FEATURE_TOGGLE = "helloWorld" @pytest.mark.parametrize( - "instance_info,expected", + "gcom_api_response,expected", [ (None, False), ({}, False), @@ -22,9 +22,16 @@ class TestIsRbacEnabledForStack: ({"config": {"feature_toggles": {"accessControlOnCall": "true"}}}, True), ], ) - def test_it_returns_based_on_feature_toggle_value(self, instance_info, expected): + @patch("apps.grafana_plugin.helpers.client.GcomAPIClient.api_get") + def test_it_returns_based_on_feature_toggle_value( + self, mocked_gcom_api_client_api_get, gcom_api_response, expected + ): + stack_id = 5 + mocked_gcom_api_client_api_get.return_value = (gcom_api_response, {"status_code": 200}) + api_client = GcomAPIClient("someFakeApiToken") - assert api_client.is_rbac_enabled_for_instance(instance_info) == expected + assert api_client.is_rbac_enabled_for_stack(stack_id) == expected + assert mocked_gcom_api_client_api_get.called_once_with(f"instances/{stack_id}?config=true") @pytest.mark.parametrize( "instance_info_feature_toggles,delimiter,expected", diff --git a/engine/apps/user_management/sync.py b/engine/apps/user_management/sync.py index e95245b6..ebbcf6fd 100644 --- a/engine/apps/user_management/sync.py +++ b/engine/apps/user_management/sync.py @@ -13,9 +13,24 @@ logger.setLevel(logging.DEBUG) def sync_organization(organization: Organization) -> None: + grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) + + # NOTE: checking whether or not RBAC is enabled depends on whether we are dealing with an open-source or cloud + # stack. For Cloud we should make a call to the GCOM API, using an admin API token, and get the list of + # feature_toggles enabled for the stack. For open-source, simply make a HEAD request to the grafana instance's API + # and consider RBAC enabled if the list RBAC permissions endpoint returns 200. We cannot simply rely on the HEAD + # call in cloud because if an instance is not active, the grafana gateway will still return 200 for the + # HEAD request. + if settings.LICENSE == settings.CLOUD_LICENSE_NAME: + gcom_client = GcomAPIClient(settings.GRAFANA_COM_ADMIN_API_TOKEN) + rbac_is_enabled = gcom_client.is_rbac_enabled_for_stack(organization.stack_id) + else: + rbac_is_enabled = grafana_api_client.is_rbac_enabled_for_organization() + + organization.is_rbac_permissions_enabled = rbac_is_enabled + _sync_instance_info(organization) - grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) _, check_token_call_status = grafana_api_client.check_token() if check_token_call_status["connected"]: organization.api_token_status = Organization.API_TOKEN_STATUS_OK @@ -47,7 +62,7 @@ def sync_organization(organization: Organization) -> None: def _sync_instance_info(organization: Organization) -> None: if organization.gcom_token: gcom_client = GcomAPIClient(organization.gcom_token) - instance_info = gcom_client.get_instance_info(organization.stack_id, include_config_query_param=True) + instance_info = gcom_client.get_instance_info(organization.stack_id) if not instance_info or instance_info["orgId"] != organization.org_id: return @@ -58,17 +73,7 @@ def _sync_instance_info(organization: Organization) -> None: organization.region_slug = instance_info["regionSlug"] organization.grafana_url = instance_info["url"] organization.cluster_slug = instance_info["clusterSlug"] - organization.is_rbac_permissions_enabled = gcom_client.is_rbac_enabled_for_instance(instance_info) organization.gcom_token_org_last_time_synced = timezone.now() - else: - # NOTE: checking whether or not RBAC is enabled depends on whether we are dealing with an open-source or cloud - # stack. For Cloud we should make a call to the GCOM API, using an admin API token, and get the list of - # feature_toggles enabled for the stack. For open-source, simply make a HEAD request to the grafana instance's API - # and consider RBAC enabled if the list RBAC permissions endpoint returns 200. We cannot simply rely on the HEAD - # call in cloud because if an instance is not active, the grafana gateway will still return 200 for the - # HEAD request. - grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token) - organization.is_rbac_permissions_enabled = grafana_api_client.is_rbac_enabled_for_organization() def sync_users_and_teams(client: GrafanaAPIClient, organization: Organization) -> None: diff --git a/engine/apps/user_management/tests/test_sync.py b/engine/apps/user_management/tests/test_sync.py index 76537cef..86d4881b 100644 --- a/engine/apps/user_management/tests/test_sync.py +++ b/engine/apps/user_management/tests/test_sync.py @@ -312,36 +312,18 @@ def test_sync_organization_is_rbac_permissions_enabled_open_source(make_organiza assert organization.is_rbac_permissions_enabled == grafana_api_response -@patch("apps.user_management.sync.GcomAPIClient.api_get") -@pytest.mark.parametrize( - "instance_info,expected", - [ - ({"config": {"feature_toggles": {}}}, False), - ({"config": {"feature_toggles": {"accessControlOnCall": "false"}}}, False), - ({"config": {"feature_toggles": {"accessControlOnCall": "true"}}}, True), - ], -) +@pytest.mark.parametrize("gcom_api_response", [False, True]) +@patch("apps.user_management.sync.GcomAPIClient") +@override_settings(LICENSE=settings.CLOUD_LICENSE_NAME) +@override_settings(GRAFANA_COM_ADMIN_API_TOKEN="mockedToken") @pytest.mark.django_db -def test_sync_organization_is_rbac_permissions_enabled_cloud( - mocked_gcom_client, make_organization, instance_info, expected -): +def test_sync_organization_is_rbac_permissions_enabled_cloud(mocked_gcom_client, make_organization, gcom_api_response): stack_id = 5 - organization = make_organization(stack_id=stack_id, gcom_token="TEST_GCOM_TOKEN") + organization = make_organization(stack_id=stack_id) api_check_token_call_status = {"connected": True} - instance_info.update( - { - "orgId": organization.org_id, - "slug": organization.stack_slug, - "orgSlug": organization.org_slug, - "orgName": organization.org_title, - "regionSlug": organization.region_slug, - "url": organization.grafana_url, - "clusterSlug": organization.cluster_slug, - } - ) - mocked_gcom_client.return_value = (instance_info, {"status_code": 200}) + mocked_gcom_client.return_value.is_rbac_enabled_for_stack.return_value = gcom_api_response api_users_response = ( { @@ -385,7 +367,10 @@ def test_sync_organization_is_rbac_permissions_enabled_cloud( sync_organization(organization) organization.refresh_from_db() - assert organization.is_rbac_permissions_enabled == expected + + assert mocked_gcom_client.return_value.called_once_with("mockedToken") + assert mocked_gcom_client.return_value.is_rbac_enabled_for_stack.called_once_with(stack_id) + assert organization.is_rbac_permissions_enabled == gcom_api_response @pytest.mark.django_db From 4df898528351bc70079785501522b78ad0fabb46 Mon Sep 17 00:00:00 2001 From: jorgeav <54142549+jorgeav@users.noreply.github.com> Date: Tue, 5 Dec 2023 05:39:04 +1100 Subject: [PATCH 09/14] Jinja2 template helper filter datetimeformat_as_timezone (#3426) # What this PR does Add an additional jinja2 template helper filter to convert a timezone aware datetime to a different timezone. ## Which issue(s) this PR fixes Alert payloads that originate from different time zones may include timestamps having a local time offset. This filter enables standardization of timestamp timezones. ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --------- Co-authored-by: Joey Orlando --- CHANGELOG.md | 4 ++ docs/sources/jinja2-templating/_index.md | 8 ++- engine/common/jinja_templater/filters.py | 8 +++ .../jinja_templater/jinja_template_env.py | 2 + .../common/tests/test_apply_jinja_template.py | 57 +++++++++++++++++++ engine/common/tests/test_base64decode.py | 7 --- .../CheatSheet/CheatSheet.config.ts | 2 +- 7 files changed, 78 insertions(+), 10 deletions(-) delete mode 100644 engine/common/tests/test_base64decode.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b768aaa7..1efb5afb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Added + +- Add `datetimeformat_as_timezone` Jinja2 template helper filter by @jorgeav ([#3426](https://github.com/grafana/oncall/pull/3426)) + ### Changed - Disallow creating and deleting direct paging integrations by @vadimkerr ([#3475](https://github.com/grafana/oncall/pull/3475)) diff --git a/docs/sources/jinja2-templating/_index.md b/docs/sources/jinja2-templating/_index.md index b3e1d311..64d3489c 100644 --- a/docs/sources/jinja2-templating/_index.md +++ b/docs/sources/jinja2-templating/_index.md @@ -205,8 +205,12 @@ Built-in functions: - `tojson_pretty` - same as tojson, but prettified - `iso8601_to_time` - converts time from iso8601 (`2015-02-17T18:30:20.000Z`) to datetime - `datetimeformat` - converts time from datetime to the given format (`%H:%M / %d-%m-%Y` by default) +- `datetimeformat_as_timezone` - same as `datetimeformat`, with the inclusion of timezone conversion (`UTC` by default) + - Usage example: `{{ payload.alerts.startsAt | iso8601_to_time | datetimeformat_as_timezone('%Y-%m-%dT%H:%M:%S%z', 'America/Chicago') }}` - `regex_replace` - performs a regex find and replace -- `regex_match` - performs a regex match, returns `True` or `False`. Usage example: `{{ payload.ruleName | regex_match(".*") }}` -- `b64decode` - performs a base64 string decode. Usage example: `{{ payload.data | b64decode }}` +- `regex_match` - performs a regex match, returns `True` or `False` + - Usage example: `{{ payload.ruleName | regex_match(".*") }}` +- `b64decode` - performs a base64 string decode + - Usage example: `{{ payload.data | b64decode }}` {{< section >}} diff --git a/engine/common/jinja_templater/filters.py b/engine/common/jinja_templater/filters.py index e24d01ee..a14a3ad5 100644 --- a/engine/common/jinja_templater/filters.py +++ b/engine/common/jinja_templater/filters.py @@ -3,6 +3,7 @@ import json import re from django.utils.dateparse import parse_datetime +from pytz import timezone def datetimeformat(value, format="%H:%M / %d-%m-%Y"): @@ -12,6 +13,13 @@ def datetimeformat(value, format="%H:%M / %d-%m-%Y"): return None +def datetimeformat_as_timezone(value, format="%H:%M / %d-%m-%Y", tz="UTC"): + try: + return value.astimezone(timezone(tz)).strftime(format) + except (ValueError, AttributeError, TypeError): + return None + + def iso8601_to_time(value): try: return parse_datetime(value) diff --git a/engine/common/jinja_templater/jinja_template_env.py b/engine/common/jinja_templater/jinja_template_env.py index 21c227c7..1941bff8 100644 --- a/engine/common/jinja_templater/jinja_template_env.py +++ b/engine/common/jinja_templater/jinja_template_env.py @@ -6,6 +6,7 @@ from jinja2.sandbox import SandboxedEnvironment from .filters import ( b64decode, datetimeformat, + datetimeformat_as_timezone, iso8601_to_time, json_dumps, regex_match, @@ -22,6 +23,7 @@ def raise_security_exception(name): jinja_template_env = SandboxedEnvironment(loader=BaseLoader()) jinja_template_env.filters["datetimeformat"] = datetimeformat +jinja_template_env.filters["datetimeformat_as_timezone"] = datetimeformat_as_timezone jinja_template_env.filters["iso8601_to_time"] = iso8601_to_time jinja_template_env.filters["tojson_pretty"] = to_pretty_json jinja_template_env.globals["time"] = timezone.now diff --git a/engine/common/tests/test_apply_jinja_template.py b/engine/common/tests/test_apply_jinja_template.py index aab3f488..e2c0dc3b 100644 --- a/engine/common/tests/test_apply_jinja_template.py +++ b/engine/common/tests/test_apply_jinja_template.py @@ -1,7 +1,10 @@ +import base64 import json import pytest from django.conf import settings +from django.utils.dateparse import parse_datetime +from pytz import timezone from common.jinja_templater import apply_jinja_template from common.jinja_templater.apply_jinja_template import JinjaTemplateError, JinjaTemplateWarning @@ -14,6 +17,60 @@ def test_apply_jinja_template(): assert payload == result +def test_apply_jinja_template_iso8601_to_time(): + payload = {"name": "2023-11-22T15:30:00.000000000Z"} + + result = apply_jinja_template( + "{{ payload.name | iso8601_to_time }}", + payload, + ) + expected = str(parse_datetime(payload["name"])) + assert result == expected + + +def test_apply_jinja_template_datetimeformat(): + payload = {"aware": "2023-05-28 23:11:12+0000", "naive": "2023-05-28 23:11:12"} + + assert apply_jinja_template( + "{{ payload.aware | iso8601_to_time | datetimeformat('%Y-%m-%dT%H:%M:%S%z') }}", + payload, + ) == parse_datetime(payload["aware"]).strftime("%Y-%m-%dT%H:%M:%S%z") + assert apply_jinja_template( + "{{ payload.naive | iso8601_to_time | datetimeformat('%Y-%m-%dT%H:%M:%S%z') }}", + payload, + ) == parse_datetime(payload["naive"]).strftime("%Y-%m-%dT%H:%M:%S%z") + + +def test_apply_jinja_template_datetimeformat_as_timezone(): + payload = {"aware": "2023-05-28 23:11:12+0000", "naive": "2023-05-28 23:11:12"} + + assert apply_jinja_template( + "{{ payload.aware | iso8601_to_time | datetimeformat_as_timezone('%Y-%m-%dT%H:%M:%S%z', 'America/Chicago') }}", + payload, + ) == parse_datetime(payload["aware"]).astimezone(timezone("America/Chicago")).strftime("%Y-%m-%dT%H:%M:%S%z") + assert apply_jinja_template( + "{{ payload.naive | iso8601_to_time | datetimeformat_as_timezone('%Y-%m-%dT%H:%M:%S%z', 'America/Chicago') }}", + payload, + ) == parse_datetime(payload["naive"]).astimezone(timezone("America/Chicago")).strftime("%Y-%m-%dT%H:%M:%S%z") + + with pytest.raises(JinjaTemplateWarning): + apply_jinja_template( + "{{ payload.aware | iso8601_to_time | datetimeformat_as_timezone('%Y-%m-%dT%H:%M:%S%z', 'potato') }}", + payload, + ) + + +def test_apply_jinja_template_b64decode(): + payload = {"name": "SGVsbG8sIHdvcmxkIQ=="} + + assert apply_jinja_template( + "{{ payload.name | b64decode }}", + payload, + ) == base64.b64decode( + payload["name"] + ).decode("utf-8") + + def test_apply_jinja_template_json_dumps(): payload = {"name": "test"} diff --git a/engine/common/tests/test_base64decode.py b/engine/common/tests/test_base64decode.py deleted file mode 100644 index 59fbf666..00000000 --- a/engine/common/tests/test_base64decode.py +++ /dev/null @@ -1,7 +0,0 @@ -from common.jinja_templater.filters import b64decode - - -def test_base64_decode(): - original = "dGVzdCBzdHJpbmch" - expected = "test string!" - assert b64decode(original) == expected diff --git a/grafana-plugin/src/components/CheatSheet/CheatSheet.config.ts b/grafana-plugin/src/components/CheatSheet/CheatSheet.config.ts index 506522ae..478f70aa 100644 --- a/grafana-plugin/src/components/CheatSheet/CheatSheet.config.ts +++ b/grafana-plugin/src/components/CheatSheet/CheatSheet.config.ts @@ -80,7 +80,7 @@ export const genericTemplateCheatSheet: CheatSheetInterface = { { listItemName: 'payload - payload of last alert in the group' }, { listItemName: 'web_title, web_mesage, web_image_url - templates from Web' }, { listItemName: 'payload, grafana_oncall_link, grafana_oncall_incident_id, integration_name, source_link' }, - { listItemName: 'time(), datetimeformat, iso8601_to_time' }, + { listItemName: 'time(), datetimeformat, datetimeformat_as_timezone, iso8601_to_time' }, { listItemName: 'to_pretty_json' }, { listItemName: 'regex_replace, regex_match' }, { listItemName: 'b64decode' }, From 332aa8ca82e7a3e3976431a7b973ee165cd09f0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 18:41:32 +0000 Subject: [PATCH 10/14] Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /grafana-plugin (#3478) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@adobe/css-tools](https://github.com/adobe/css-tools) from 4.3.1 to 4.3.2.
Changelog

Sourced from @​adobe/css-tools's changelog.

4.3.2 / 2023-11-28

  • Fix redos vulnerability with specific crafted css string - CVE-2023-48631
  • Fix Problem parsing with :is() and nested :nth-child() #211
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@adobe/css-tools&package-manager=npm_and_yarn&previous-version=4.3.1&new-version=4.3.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/grafana/oncall/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Joey Orlando --- grafana-plugin/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/grafana-plugin/yarn.lock b/grafana-plugin/yarn.lock index 3aa546d3..324d3585 100644 --- a/grafana-plugin/yarn.lock +++ b/grafana-plugin/yarn.lock @@ -3,9 +3,9 @@ "@adobe/css-tools@^4.0.1": - version "4.3.1" - resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.1.tgz#abfccb8ca78075a2b6187345c26243c1a0842f28" - integrity sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg== + version "4.3.2" + resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.2.tgz#a6abc715fb6884851fca9dad37fc34739a04fd11" + integrity sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw== "@ampproject/remapping@^2.1.0": version "2.2.0" From 45200c33a18e848ecbb41e23443321aa9ef4d1e3 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Mon, 4 Dec 2023 15:42:12 -0300 Subject: [PATCH 11/14] Update beat schedule to use crontab schedule types (#3497) Update celery beat schedule to use crontab schedule types, since otherwise the timedelta is relative to the celery start and when we have a restart we have some bigger than expected gaps between task runs (alternatively it seems we could also use the `relative` option described [here](https://docs.celeryq.dev/en/main/userguide/periodic-tasks.html#available-fields)) Related to https://github.com/grafana/oncall-private/issues/2347 --- engine/settings/base.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/engine/settings/base.py b/engine/settings/base.py index 9dbc684e..310d0078 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -495,7 +495,7 @@ CELERY_BEAT_SCHEDULE = { }, "start_refresh_ical_files": { "task": "apps.schedules.tasks.refresh_ical_files.start_refresh_ical_files", - "schedule": 10 * 60, + "schedule": crontab(minute="*/10"), # every 10 minutes "args": (), }, "start_notify_about_gaps_in_schedule": { @@ -545,24 +545,24 @@ CELERY_BEAT_SCHEDULE = { }, "process_failed_to_invoke_celery_tasks": { "task": "apps.base.tasks.process_failed_to_invoke_celery_tasks", - "schedule": 60 * 10, + "schedule": crontab(minute="*/10"), # every 10 minutes "args": (), }, "conditionally_send_going_oncall_push_notifications_for_all_schedules": { "task": "apps.mobile_app.tasks.going_oncall_notification.conditionally_send_going_oncall_push_notifications_for_all_schedules", - "schedule": 10 * 60, + "schedule": crontab(minute="*/10"), # every 10 minutes }, "notify_shift_swap_requests": { "task": "apps.mobile_app.tasks.new_shift_swap_request.notify_shift_swap_requests", - "schedule": getenv_integer("NOTIFY_SHIFT_SWAP_REQUESTS_INTERVAL", default=10 * 60), + "schedule": crontab(minute="*/{}".format(getenv_integer("NOTIFY_SHIFT_SWAP_REQUESTS_INTERVAL", default=10))), }, "send_shift_swap_request_slack_followups": { "task": "apps.schedules.tasks.shift_swaps.slack_followups.send_shift_swap_request_slack_followups", - "schedule": 10 * 60, + "schedule": crontab(minute="*/10"), # every 10 minutes }, "save_organizations_ids_in_cache": { "task": "apps.metrics_exporter.tasks.save_organizations_ids_in_cache", - "schedule": 60 * 30, + "schedule": crontab(minute="*/30"), # every 30 minutes "args": (), }, "check_heartbeats": { @@ -579,7 +579,11 @@ if ESCALATION_AUDITOR_ENABLED: # # ex. if the integration is configured to expect a heartbeat every 15 minutes then this value should be set # to something like 13 * 60 (every 13 minutes) - "schedule": getenv_integer("ALERT_GROUP_ESCALATION_AUDITOR_CELERY_TASK_HEARTBEAT_INTERVAL", 13 * 60), + "schedule": crontab( + minute="*/{}".format( + getenv_integer("ALERT_GROUP_ESCALATION_AUDITOR_CELERY_TASK_HEARTBEAT_INTERVAL", default=13) + ) + ), "args": (), } From b73da8e2a42e730878617ff0da13adf549dba8c5 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Mon, 4 Dec 2023 16:01:56 -0300 Subject: [PATCH 12/14] Minor update to schedules API docs (#3488) --- docs/sources/oncall-api-reference/schedules.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/oncall-api-reference/schedules.md b/docs/sources/oncall-api-reference/schedules.md index 9e89f422..228c8f07 100644 --- a/docs/sources/oncall-api-reference/schedules.md +++ b/docs/sources/oncall-api-reference/schedules.md @@ -42,9 +42,9 @@ The above command returns JSON structured in the following way: | Parameter | Unique | Required | Description | | -------------------- | :----: | :--------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `name` | Yes | Yes | Schedule name. | -| `type` | No | Yes | Schedule type. May be `ical` (used for iCalendar integration) or `calendar` (used for manually created on-call shifts). | +| `type` | No | Yes | Schedule type. May be `ical` (used for iCalendar integration), `calendar` (used for manually created on-call shifts) or `web` (for web UI managed schedules). | | `team_id` | No | No | ID of the team. | -| `time_zone` | No | Optional | Schedule time zone. Is used for manually added on-call shifts in Schedules with type `calendar`. Default time zone is `UTC`. For more information about time zones, see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). | +| `time_zone` | No | Yes | Schedule time zone. It is used for manually added on-call shifts in Schedules with type `calendar`. Default time zone is `UTC`. For more information about time zones, see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). Not used for schedules with type `ical`. | | `ical_url_primary` | No | If type = `ical` | URL of external iCal calendar for schedule with type `ical`. | | `ical_url_overrides` | No | Optional | URL of external iCal calendar for schedule with any type. Events from this calendar override events from primary calendar or from on-call shifts. | | `enable_web_overrides` | No | Optional | Whether to enable web overrides or not. Setting specific for API/Terraform based schedules (`calendar` type). | From 6c3f236db01fffe312ab88ee9d02622195681ad2 Mon Sep 17 00:00:00 2001 From: Dominik Broj Date: Tue, 5 Dec 2023 11:23:35 +0100 Subject: [PATCH 13/14] add borders to editors (#3490) # What this PR does add missing borders to template editors ## Which issue(s) this PR fixes https://github.com/grafana/oncall/issues/3463 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- .../IntegrationTemplate/IntegrationTemplate.module.scss | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/grafana-plugin/src/containers/IntegrationTemplate/IntegrationTemplate.module.scss b/grafana-plugin/src/containers/IntegrationTemplate/IntegrationTemplate.module.scss index 8bcab258..8aab1d4c 100644 --- a/grafana-plugin/src/containers/IntegrationTemplate/IntegrationTemplate.module.scss +++ b/grafana-plugin/src/containers/IntegrationTemplate/IntegrationTemplate.module.scss @@ -67,5 +67,7 @@ } .template-editor-block-content { - height: calc(100% - 60px); + height: calc(100% - 57px); + border-left: var(--border-weak); + border-right: var(--border-weak); } From 61f0c177e84899b02aea1bccf671efe71a379475 Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Tue, 5 Dec 2023 07:50:51 -0500 Subject: [PATCH 14/14] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1efb5afb..7c712805 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## v1.3.71 (2023-12-05) + ### Added - Add `datetimeformat_as_timezone` Jinja2 template helper filter by @jorgeav ([#3426](https://github.com/grafana/oncall/pull/3426))