From 92fa509d22185a8a9682d5755417294f6ecbf354 Mon Sep 17 00:00:00 2001 From: Dominik Broj Date: Fri, 15 Dec 2023 09:58:25 +0100 Subject: [PATCH 01/16] Brojd/improve e2e tests dx (#3516) # What this PR does - introduce e2e tests in Tilt - support e2e tests commands in Makefile - stabilize local setup ## Which issue(s) this PR fixes https://github.com/grafana/oncall/issues/3492 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- .gitignore | 2 + .prettierrc.js | 8 +++ CHANGELOG.md | 6 +++ Makefile | 9 ++++ Tiltfile | 51 ++++++++++++++++++- dev/README.md | 19 ++++--- dev/helm-local.yml | 2 +- grafana-plugin/.dockerignore | 1 - grafana-plugin/.gitignore | 1 - grafana-plugin/e2e-tests/.env.example | 2 - .../e2e-tests/alerts/onCallSchedule.test.ts | 3 -- .../escalationChains/escalationPolicy.test.ts | 10 ++-- grafana-plugin/e2e-tests/globalSetup.ts | 37 +++++++------- .../e2e-tests/integrations/heartbeat.test.ts | 12 +++-- .../integrations/integrationsTable.test.ts | 9 +--- .../integrations/maintenanceMode.test.ts | 4 +- grafana-plugin/e2e-tests/utils/constants.ts | 1 - grafana-plugin/e2e-tests/utils/forms.ts | 2 +- .../e2e-tests/utils/integrations.ts | 28 ++++++---- grafana-plugin/e2e-tests/utils/navigation.ts | 12 +++-- grafana-plugin/playwright.config.ts | 27 ++++++---- .../components/TooltipBadge/TooltipBadge.tsx | 5 +- .../PluginConfigPage/PluginConfigPage.tsx | 20 +++----- .../src/pages/integration/Integration.tsx | 5 +- .../src/pages/integrations/Integrations.tsx | 1 + .../src/state/rootBaseStore/index.ts | 9 ++-- .../state/rootBaseStore/rootBaseStore.test.ts | 23 +-------- grafana-plugin/src/utils/consts.ts | 9 ++++ 28 files changed, 193 insertions(+), 125 deletions(-) create mode 100644 .prettierrc.js diff --git a/.gitignore b/.gitignore index 7c681d30..ce15bec1 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ venv yarn.lock node_modules + +test-results \ No newline at end of file diff --git a/.prettierrc.js b/.prettierrc.js new file mode 100644 index 00000000..eba28f8d --- /dev/null +++ b/.prettierrc.js @@ -0,0 +1,8 @@ +overrides: [ + { + files: ["*.yml", "*.yaml"], + options: { + singleQuote: false, + }, + }, +]; diff --git a/CHANGELOG.md b/CHANGELOG.md index e4926cd0..3a62c032 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased + +### Added + +- Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516)) + ## v1.3.80 (2023-12-14) ### Added diff --git a/Makefile b/Makefile index e4092de6..236d9910 100644 --- a/Makefile +++ b/Makefile @@ -197,6 +197,15 @@ engine-manage: ## run Django's `manage.py` script, inside of a docker container ## https://docs.djangoproject.com/en/4.1/ref/django-admin/#django-admin-makemigrations $(call run_engine_docker_command,python manage.py $(CMD)) +test-e2e: ## run the e2e tests in headless mode + yarn --cwd grafana-plugin test:e2e + +test-e2e-watch: ## start e2e tests in watch mode + yarn --cwd grafana-plugin test:e2e:watch + +test-e2e-show-report: ## open last e2e test report + yarn --cwd grafana-plugin playwright show-report + ui-test: ## run the UI tests $(call run_ui_docker_command,yarn test) diff --git a/Tiltfile b/Tiltfile index 2b8a79f9..08372752 100644 --- a/Tiltfile +++ b/Tiltfile @@ -1,3 +1,4 @@ +load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input') running_under_parent_tiltfile = os.getenv("TILT_PARENT", "false") == "true" # The user/pass that you will login to Grafana with grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall") @@ -36,7 +37,7 @@ docker_build_sub( "localhost:63628/oncall/engine:dev", context="./engine", cache_from=["grafana/oncall:latest", "grafana/oncall:dev"], - ignore=["./grafana-plugin/test-results/", "./grafana-plugin/dist/", "./grafana-plugin/e2e-tests/"], + ignore=["./test-results/", "./grafana-plugin/dist/", "./grafana-plugin/e2e-tests/"], child_context=".", target="dev", extra_cmds=["ADD ./grafana-plugin/src/plugin.json /etc/grafana-plugin/src/plugin.json"], @@ -54,10 +55,56 @@ local_resource( "build-ui", labels=["OnCallUI"], cmd="cd grafana-plugin && yarn install && yarn build:dev", - serve_cmd="cd grafana-plugin && ONCALL_API_URL=http://oncall-dev-engine:8080 yarn watch", + serve_cmd="cd grafana-plugin && yarn watch", allow_parallel=True, ) +local_resource( + "e2e-tests", + labels=["E2eTests"], + cmd="cd grafana-plugin && yarn test:e2e", + trigger_mode=TRIGGER_MODE_MANUAL, + auto_init=False, + resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine"] +) + +cmd_button( + name="E2E Tests - headless run", + argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE"], + text="Restart headless run", + resource="e2e-tests", + icon_name="replay", + inputs=[ + text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"), + bool_input("REPORTER", "Use HTML reporter", True, 'html', 'line'), + bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), + ] +) + +cmd_button( + name="E2E Tests - open watch mode", + argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"], + text="Open watch mode", + resource="e2e-tests", + icon_name="visibility", +) + +cmd_button( + name="E2E Tests - show report", + argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"], + text="Show last HTML report", + resource="e2e-tests", + icon_name="assignment", +) + +cmd_button( + name="E2E Tests - stop current run", + argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"], + text="Stop", + resource="e2e-tests", + icon_name="dangerous", +) + yaml = helm("helm/oncall", name=HELM_PREFIX, values=["./dev/helm-local.yml", "./dev/helm-local.dev.yml"]) k8s_yaml(yaml) diff --git a/dev/README.md b/dev/README.md index 7450a7ab..2bd4bf94 100644 --- a/dev/README.md +++ b/dev/README.md @@ -243,13 +243,18 @@ are run on pull request CI builds. New features should ideally include a new/mod To run these tests locally simply do the following: -```bash -npx playwright install # install playwright dependencies -cp ./grafana-plugin/e2e-tests/.env.example ./grafana-plugin/e2e-tests/.env -# you may need to tweak the values in ./grafana-plugin/.env according to your local setup -cd grafana-plugin -yarn test:e2e -``` +1. Install Playwright dependencies with `npx playwright install` +2. [Launch the environment](#launch-the-environment) +3. Then you interact with tests in 2 different ways: + 1. Using `Tilt` - open _E2eTests_ section where you will find 4 buttons: + 1. Restart headless run (you can configure browsers, reporter and failure allowance there) + 2. Open watch mode + 3. Show last HTML report + 4. Stop (stops any pending e2e test process) + 2. Using `make`: + 1. `make test:e2e` to start headless run + 2. `make test:e2e:watch` to open watch mode + 3. `make test:e2e:show:report` to open last HTML report ## Helm unit tests diff --git a/dev/helm-local.yml b/dev/helm-local.yml index af62b51a..68674c37 100644 --- a/dev/helm-local.yml +++ b/dev/helm-local.yml @@ -1,4 +1,4 @@ -base_url: localhost:30001 +base_url: localhost:8080 base_url_protocol: http env: - name: GRAFANA_CLOUD_NOTIFICATIONS_ENABLED diff --git a/grafana-plugin/.dockerignore b/grafana-plugin/.dockerignore index beb901bf..5ba54898 100644 --- a/grafana-plugin/.dockerignore +++ b/grafana-plugin/.dockerignore @@ -1,5 +1,4 @@ node_modules frontend_enterprise .DS_Store -test-results playwright-report diff --git a/grafana-plugin/.gitignore b/grafana-plugin/.gitignore index ab331585..e768d7d7 100644 --- a/grafana-plugin/.gitignore +++ b/grafana-plugin/.gitignore @@ -16,7 +16,6 @@ grafana-plugin.yml frontend_enterprise # playwright -/test-results/ /playwright-report/ /playwright/.cache/ /e2e-tests/storageState.json diff --git a/grafana-plugin/e2e-tests/.env.example b/grafana-plugin/e2e-tests/.env.example index 497ade60..9d3195e7 100644 --- a/grafana-plugin/e2e-tests/.env.example +++ b/grafana-plugin/e2e-tests/.env.example @@ -1,5 +1,3 @@ -BASE_URL=http://localhost:30002/grafana -ONCALL_API_URL=http://oncall-dev-engine-external:8080/ GRAFANA_VIEWER_USERNAME=viewer GRAFANA_VIEWER_PASSWORD=viewer GRAFANA_EDITOR_USERNAME=editor diff --git a/grafana-plugin/e2e-tests/alerts/onCallSchedule.test.ts b/grafana-plugin/e2e-tests/alerts/onCallSchedule.test.ts index 7cb7686d..6859ec03 100644 --- a/grafana-plugin/e2e-tests/alerts/onCallSchedule.test.ts +++ b/grafana-plugin/e2e-tests/alerts/onCallSchedule.test.ts @@ -6,9 +6,6 @@ import { createIntegrationAndSendDemoAlert } from '../utils/integrations'; import { createOnCallSchedule } from '../utils/schedule'; test('we can create an oncall schedule + receive an alert', async ({ adminRolePage }) => { - // this test does a lot of stuff, lets give it adequate time to do its thing - test.slow(); - const { page, userName } = adminRolePage; const escalationChainName = generateRandomValue(); const integrationName = generateRandomValue(); diff --git a/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts b/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts index cf9126ba..70b10b3e 100644 --- a/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts +++ b/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts @@ -1,6 +1,6 @@ -import {expect, test} from "../fixtures"; -import {createEscalationChain, EscalationStep, selectEscalationStepValue} from "../utils/escalationChain"; -import {generateRandomValue} from "../utils/forms"; +import { expect, test } from '../fixtures'; +import { createEscalationChain, EscalationStep, selectEscalationStepValue } from '../utils/escalationChain'; +import { generateRandomValue } from '../utils/forms'; test('escalation policy does not go back to "Default" after adding users to notify', async ({ adminRolePage }) => { const { page, userName } = adminRolePage; @@ -13,7 +13,5 @@ test('escalation policy does not go back to "Default" after adding users to noti // reload and check if important is still selected await page.reload(); - await page.waitForLoadState('networkidle'); - - expect(await page.locator('text=Important').isVisible()).toBe(true); + await expect(page.getByText('Important')).toBeVisible(); }); diff --git a/grafana-plugin/e2e-tests/globalSetup.ts b/grafana-plugin/e2e-tests/globalSetup.ts index 835fc247..41752b78 100644 --- a/grafana-plugin/e2e-tests/globalSetup.ts +++ b/grafana-plugin/e2e-tests/globalSetup.ts @@ -1,6 +1,8 @@ import { OrgRole } from '@grafana/data'; import { test as setup, chromium, expect, Page, BrowserContext, FullConfig, APIRequestContext } from '@playwright/test'; +import { getOnCallApiUrl } from 'utils/consts'; + import { VIEWER_USER_STORAGE_STATE, EDITOR_USER_STORAGE_STATE, ADMIN_USER_STORAGE_STATE } from '../playwright.config'; import GrafanaAPIClient from './utils/clients/grafana'; @@ -13,7 +15,6 @@ import { GRAFANA_VIEWER_USERNAME, IS_CLOUD, IS_OPEN_SOURCE, - ONCALL_API_URL, } from './utils/constants'; import { clickButton, getInputByName } from './utils/forms'; import { goToGrafanaPage } from './utils/navigation'; @@ -59,17 +60,26 @@ const configureOnCallPlugin = async (page: Page): Promise => { * go to the oncall plugin configuration page and wait for the page to be loaded */ await goToGrafanaPage(page, '/plugins/grafana-oncall-app'); - await page.waitForSelector('text=Configure Grafana OnCall'); + await page.waitForTimeout(2000); - /** - * we may need to fill in the OnCall API URL if it is not set in the process.env - * of the frontend build - */ - const onCallApiUrlInput = getInputByName(page, 'onCallApiUrl'); - const pluginIsAutoConfigured = (await onCallApiUrlInput.count()) === 0; + // if plugin is configured, go to OnCall + const isConfigured = (await page.getByText('Connected to OnCall').count()) >= 1; + if (isConfigured) { + await page.getByRole('link', { name: 'Open Grafana OnCall' }).click(); + return; + } - if (!pluginIsAutoConfigured) { - await onCallApiUrlInput.fill(ONCALL_API_URL); + // otherwise we may need to reconfigure the plugin + const needToReconfigure = (await page.getByText('try removing your plugin configuration').count()) >= 1; + if (needToReconfigure) { + await clickButton({ page, buttonText: 'Remove current configuration' }); + await clickButton({ page, buttonText: /^Remove$/ }); + } + await page.waitForTimeout(2000); + + const needToEnterOnCallApiUrl = await page.getByText(/Connected to OnCall/).isHidden(); + if (needToEnterOnCallApiUrl) { + await getInputByName(page, 'onCallApiUrl').fill(getOnCallApiUrl() || 'http://oncall-dev-engine:8080'); await clickButton({ page, buttonText: 'Connect' }); } @@ -88,13 +98,6 @@ const configureOnCallPlugin = async (page: Page): Promise => { * https://github.com/grafana/incident/blob/main/plugin/e2e/global-setup.ts */ setup('Configure Grafana OnCall plugin', async ({ request }, { config }) => { - /** - * Unconditionally marks the setup as "slow", giving it triple the default timeout. - * This is mostly useful for the rare case for Cloud Grafana instances where the instance may be down/unavailable - * and we need to poll it until it is available - */ - setup.slow(); - if (IS_CLOUD) { await grafanaApiClient.pollInstanceUntilItIsHealthy(request); } diff --git a/grafana-plugin/e2e-tests/integrations/heartbeat.test.ts b/grafana-plugin/e2e-tests/integrations/heartbeat.test.ts index 2ba7e16e..4b5737ba 100644 --- a/grafana-plugin/e2e-tests/integrations/heartbeat.test.ts +++ b/grafana-plugin/e2e-tests/integrations/heartbeat.test.ts @@ -1,6 +1,7 @@ import { test, Page, expect } from '../fixtures'; import { generateRandomValue, selectDropdownValue } from '../utils/forms'; -import { createIntegration } from '../utils/integrations'; +import { createIntegration, searchIntegrationAndAssertItsPresence } from '../utils/integrations'; +import { goToOnCallPage } from '../utils/navigation'; const HEARTBEAT_SETTINGS_FORM_TEST_ID = 'heartbeat-settings-form'; @@ -12,7 +13,8 @@ test.describe("updating an integration's heartbeat interval works", async () => }; test('change heartbeat interval', async ({ adminRolePage: { page } }) => { - await createIntegration({ page, integrationName: generateRandomValue() }); + const integrationName = generateRandomValue(); + await createIntegration({ page, integrationName }); await _openHeartbeatSettingsForm(page); @@ -42,7 +44,8 @@ test.describe("updating an integration's heartbeat interval works", async () => }); test('send heartbeat', async ({ adminRolePage: { page } }) => { - await createIntegration({ page, integrationName: generateRandomValue() }); + const integrationName = generateRandomValue(); + await createIntegration({ page, integrationName }); await _openHeartbeatSettingsForm(page); @@ -59,6 +62,9 @@ test.describe("updating an integration's heartbeat interval works", async () => */ await page.request.get(endpoint); await page.reload({ waitUntil: 'networkidle' }); + + await goToOnCallPage(page, 'integrations'); + await searchIntegrationAndAssertItsPresence({ page, integrationName }); await page.getByTestId('heartbeat-badge').waitFor(); }); }); diff --git a/grafana-plugin/e2e-tests/integrations/integrationsTable.test.ts b/grafana-plugin/e2e-tests/integrations/integrationsTable.test.ts index 28b1a3b9..45b1f7b7 100644 --- a/grafana-plugin/e2e-tests/integrations/integrationsTable.test.ts +++ b/grafana-plugin/e2e-tests/integrations/integrationsTable.test.ts @@ -17,7 +17,6 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs await createIntegration({ page, integrationSearchText: 'Alertmanager', - shouldGoToIntegrationsPage: false, integrationName: ALERTMANAGER_INTEGRATION_NAME, }); await page.waitForTimeout(1000); @@ -32,7 +31,6 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs await createIntegration({ page, integrationSearchText: 'Direct paging', - shouldGoToIntegrationsPage: false, integrationName: DIRECT_PAGING_INTEGRATION_NAME, }); await page.waitForTimeout(1000); @@ -40,15 +38,13 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs await page.getByRole('tab', { name: 'Tab Integrations' }).click(); // By default Monitoring Systems tab is opened and newly created integrations are visible except Direct Paging one - await searchIntegrationAndAssertItsPresence({ page, integrationsTable, integrationName: WEBHOOK_INTEGRATION_NAME }); + await searchIntegrationAndAssertItsPresence({ page, integrationName: WEBHOOK_INTEGRATION_NAME }); await searchIntegrationAndAssertItsPresence({ page, - integrationsTable, integrationName: ALERTMANAGER_INTEGRATION_NAME, }); await searchIntegrationAndAssertItsPresence({ page, - integrationsTable, integrationName: DIRECT_PAGING_INTEGRATION_NAME, visibleExpected: false, }); @@ -57,19 +53,16 @@ test('Integrations table shows data in Monitoring Systems and Direct Paging tabs await page.getByRole('tab', { name: 'Tab Manual Direct Paging' }).click(); await searchIntegrationAndAssertItsPresence({ page, - integrationsTable, integrationName: WEBHOOK_INTEGRATION_NAME, visibleExpected: false, }); await searchIntegrationAndAssertItsPresence({ page, - integrationsTable, integrationName: ALERTMANAGER_INTEGRATION_NAME, visibleExpected: false, }); await searchIntegrationAndAssertItsPresence({ page, - integrationsTable, integrationName: 'Direct paging', }); }); diff --git a/grafana-plugin/e2e-tests/integrations/maintenanceMode.test.ts b/grafana-plugin/e2e-tests/integrations/maintenanceMode.test.ts index f3471840..d1f61be7 100644 --- a/grafana-plugin/e2e-tests/integrations/maintenanceMode.test.ts +++ b/grafana-plugin/e2e-tests/integrations/maintenanceMode.test.ts @@ -103,6 +103,7 @@ test.describe('maintenance mode works', () => { await createEscalationChain(page, escalationChainName, EscalationStep.NotifyUsers, userName); await createIntegration({ page, integrationName }); + await page.waitForTimeout(1000); await assignEscalationChainToIntegration(page, escalationChainName); await enableMaintenanceMode(page, maintenanceModeType); @@ -110,8 +111,6 @@ test.describe('maintenance mode works', () => { }; test('debug mode', async ({ adminRolePage: { page, userName } }) => { - test.slow(); - const { escalationChainName, integrationName } = await createIntegrationAndEscalationChainAndEnableMaintenanceMode( page, userName, @@ -128,7 +127,6 @@ test.describe('maintenance mode works', () => { }); test('"maintenance" mode', async ({ adminRolePage: { page, userName } }) => { - test.slow(); const { integrationName } = await createIntegrationAndEscalationChainAndEnableMaintenanceMode( page, userName, diff --git a/grafana-plugin/e2e-tests/utils/constants.ts b/grafana-plugin/e2e-tests/utils/constants.ts index 97fcd3b7..f6969efd 100644 --- a/grafana-plugin/e2e-tests/utils/constants.ts +++ b/grafana-plugin/e2e-tests/utils/constants.ts @@ -1,5 +1,4 @@ export const BASE_URL = process.env.BASE_URL || 'http://localhost:3000'; -export const ONCALL_API_URL = process.env.ONCALL_API_URL || 'http://host.docker.internal:8080'; export const MAILSLURP_API_KEY = process.env.MAILSLURP_API_KEY; export const GRAFANA_VIEWER_USERNAME = process.env.GRAFANA_VIEWER_USERNAME || 'viewer'; diff --git a/grafana-plugin/e2e-tests/utils/forms.ts b/grafana-plugin/e2e-tests/utils/forms.ts index 73c9734e..82aa4d8e 100644 --- a/grafana-plugin/e2e-tests/utils/forms.ts +++ b/grafana-plugin/e2e-tests/utils/forms.ts @@ -22,7 +22,7 @@ type SelectDropdownValueArgs = { type ClickButtonArgs = { page: Page; - buttonText: string; + buttonText: string | RegExp; // if provided, use this Locator as the root of our search for the button startingLocator?: Locator; }; diff --git a/grafana-plugin/e2e-tests/utils/integrations.ts b/grafana-plugin/e2e-tests/utils/integrations.ts index 72ef8ec7..68884d44 100644 --- a/grafana-plugin/e2e-tests/utils/integrations.ts +++ b/grafana-plugin/e2e-tests/utils/integrations.ts @@ -1,4 +1,4 @@ -import { Locator, Page, expect } from '@playwright/test'; +import { Page, expect } from '@playwright/test'; import { clickButton, generateRandomValue, selectDropdownValue } from './forms'; import { goToOnCallPage } from './navigation'; @@ -38,17 +38,24 @@ export const createIntegration = async ({ .click(); // fill in the required inputs - (await page.waitForSelector('input[name="verbal_name"]', { state: 'attached' })).fill(integrationName); - (await page.waitForSelector('textarea[name="description_short"]', { state: 'attached' })).fill( - 'Here goes your integration description' - ); + await page.getByPlaceholder('Integration Name').fill(integrationName); + await page.getByPlaceholder('Integration Description').fill('Here goes your integration description'); + await page.getByTestId('update-integration-button').focus(); + await page.getByTestId('update-integration-button').click(); - const grafanaUpdateBtn = page.getByTestId('update-integration-button'); - await grafanaUpdateBtn.click(); + await goToOnCallPage(page, 'integrations'); + await searchIntegrationAndAssertItsPresence({ page, integrationName }); + + await page.getByRole('link', { name: integrationName }).click(); }; export const assignEscalationChainToIntegration = async (page: Page, escalationChainName: string): Promise => { - await page.getByTestId('integration-escalation-chain-not-selected').click(); + const notSelected = page.getByTestId('integration-escalation-chain-not-selected'); + if (await notSelected.isHidden()) { + await clickButton({ page, buttonText: 'Add route' }); + await page.waitForTimeout(500); + } + await notSelected.last().click(); // assign the escalation chain to the integration await selectDropdownValue({ @@ -56,7 +63,7 @@ export const assignEscalationChainToIntegration = async (page: Page, escalationC selectType: 'grafanaSelect', placeholderText: 'Select Escalation Chain', value: escalationChainName, - startingLocator: page.getByTestId('escalation-chain-select'), + startingLocator: page.getByTestId('escalation-chain-select').last(), }); }; @@ -92,11 +99,9 @@ export const filterIntegrationsTableAndGoToDetailPage = async (page: Page, integ export const searchIntegrationAndAssertItsPresence = async ({ page, integrationName, - integrationsTable, visibleExpected = true, }: { page: Page; - integrationsTable: Locator; integrationName: string; visibleExpected?: boolean; }) => { @@ -105,6 +110,7 @@ export const searchIntegrationAndAssertItsPresence = async ({ .filter({ hasText: /^Search or filter results\.\.\.$/ }) .nth(1) .click(); + const integrationsTable = page.getByTestId('integrations-table'); await page.keyboard.insertText(integrationName); await page.keyboard.press('Enter'); await page.waitForTimeout(2000); diff --git a/grafana-plugin/e2e-tests/utils/navigation.ts b/grafana-plugin/e2e-tests/utils/navigation.ts index b5a1e4f7..d7ac6fa8 100644 --- a/grafana-plugin/e2e-tests/utils/navigation.ts +++ b/grafana-plugin/e2e-tests/utils/navigation.ts @@ -1,13 +1,15 @@ -import type { Page, Response } from '@playwright/test'; +import type { Page } from '@playwright/test'; import { BASE_URL } from './constants'; type GrafanaPage = '/plugins/grafana-oncall-app'; type OnCallPage = 'alert-groups' | 'integrations' | 'escalations' | 'schedules' | 'users'; -const _goToPage = (page: Page, url = ''): Promise => page.goto(`${BASE_URL}${url}`); +const _goToPage = async (page: Page, url = '') => page.goto(`${BASE_URL}${url}`); -export const goToGrafanaPage = (page: Page, url: GrafanaPage): Promise => _goToPage(page, url); +export const goToGrafanaPage = async (page: Page, url: GrafanaPage) => _goToPage(page, url); -export const goToOnCallPage = (page: Page, onCallPage: OnCallPage): Promise => - _goToPage(page, `/a/grafana-oncall-app/${onCallPage}`); +export const goToOnCallPage = async (page: Page, onCallPage: OnCallPage) => { + await _goToPage(page, `/a/grafana-oncall-app/${onCallPage}`); + await page.waitForTimeout(1000); +}; diff --git a/grafana-plugin/playwright.config.ts b/grafana-plugin/playwright.config.ts index 835639e5..c6721cf8 100644 --- a/grafana-plugin/playwright.config.ts +++ b/grafana-plugin/playwright.config.ts @@ -1,4 +1,4 @@ -import { PlaywrightTestProject, defineConfig, devices } from '@playwright/test'; +import { PlaywrightTestProject, defineConfig, devices, PlaywrightTestConfig } from '@playwright/test'; import path from 'path'; /** @@ -12,7 +12,11 @@ export const EDITOR_USER_STORAGE_STATE = path.join(__dirname, 'e2e-tests/.auth/e export const ADMIN_USER_STORAGE_STATE = path.join(__dirname, 'e2e-tests/.auth/admin.json'); const IS_CI = !!process.env.CI; -const BROWSERS = process.env.BROWSERS || 'chromium firefox webkit'; +const BROWSERS = process.env.BROWSERS || 'chromium'; +const REPORTER_WITH_DEFAULT = process.env.REPORTER || 'html'; +const REPORTER = ( + process.env.REPORTER === 'html' ? [['html', { open: 'never' }]] : REPORTER_WITH_DEFAULT +) as PlaywrightTestConfig['reporter']; const SETUP_PROJECT_NAME = 'setup'; const getEnabledBrowsers = (browsers: PlaywrightTestProject[]) => @@ -25,16 +29,18 @@ export default defineConfig({ testDir: './e2e-tests', /* Maximum time all the tests can run for. */ - globalTimeout: 20 * 60 * 1000, // 20 minutes + globalTimeout: 20 * 60 * 1_000, // 20 minutes + + reporter: REPORTER, /* Maximum time one test can run for. */ - timeout: 60 * 1000, + timeout: 60_000, expect: { /** * Maximum time expect() should wait for the condition to be met. * For example in `await expect(locator).toHaveText();` */ - timeout: 10000, + timeout: 6_000, }, /* Run tests in files in parallel */ fullyParallel: false, @@ -46,10 +52,10 @@ export default defineConfig({ * NOTE: until we fix this issue (https://github.com/grafana/oncall/issues/1692) which occasionally leads * to flaky tests.. let's allow 1 retry per test */ - retries: IS_CI ? 1 : 0, + retries: 1, workers: 2, /* Reporter to use. See https://playwright.dev/docs/test-reporters */ - reporter: 'html', + // reporter: 'html', /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ use: { /* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */ @@ -59,7 +65,7 @@ export default defineConfig({ trace: 'on', video: 'on', - headless: IS_CI, + headless: true, }, /* Configure projects for major browsers. The final list is filtered based on BROWSERS env var */ @@ -109,8 +115,9 @@ export default defineConfig({ // }, ]), - /* Folder for test artifacts such as screenshots, videos, traces, etc. */ - // outputDir: 'test-results/', + /* Folder for test artifacts such as screenshots, videos, traces, etc. + Set outside of grafana-plugin to prevent refreshing Grafana UI during e2e test runs */ + outputDir: '../test-results/', /* Run your local dev server before starting the tests */ // webServer: { diff --git a/grafana-plugin/src/components/TooltipBadge/TooltipBadge.tsx b/grafana-plugin/src/components/TooltipBadge/TooltipBadge.tsx index 99ede4a1..27f7b60a 100644 --- a/grafana-plugin/src/components/TooltipBadge/TooltipBadge.tsx +++ b/grafana-plugin/src/components/TooltipBadge/TooltipBadge.tsx @@ -18,6 +18,7 @@ interface TooltipBadgeProps { customIcon?: React.ReactNode; addPadding?: boolean; placement?; + testId?: string; onHover?: () => void; } @@ -36,11 +37,9 @@ const TooltipBadge: FC = (props) => { icon, customIcon, className, - ...rest + testId, } = props; - const testId = rest['data-testid']; - return ( = ({ plugin: { - meta: { jsonData, enabled: pluginIsEnabled }, + meta, + meta: { enabled: pluginIsEnabled }, }, }) => { const { search } = useLocation(); @@ -75,11 +76,8 @@ const PluginConfigPage: FC = ({ const [resettingPlugin, setResettingPlugin] = useState(false); const [pluginResetError, setPluginResetError] = useState(null); - - const pluginMetaOnCallApiUrl = jsonData?.onCallApiUrl; - const processEnvOnCallApiUrl = process.env.ONCALL_API_URL; // don't destructure this, will break how webpack supplies this - const onCallApiUrl = pluginMetaOnCallApiUrl || processEnvOnCallApiUrl; const licenseType = pluginIsConnected?.license || FALLBACK_LICENSE; + const onCallApiUrl = getOnCallApiUrl(meta); const resetQueryParams = useCallback(() => removePluginConfiguredQueryParams(pluginIsEnabled), [pluginIsEnabled]); @@ -110,12 +108,12 @@ const PluginConfigPage: FC = ({ * Supplying the env var basically allows to skip the configuration form * (check webpack.config.js to see how this is set) */ - if (!pluginMetaOnCallApiUrl && processEnvOnCallApiUrl) { + if (!hasPluginBeenConfigured(meta) && onCallApiUrl) { /** * onCallApiUrl is not yet saved in the grafana plugin settings, but has been supplied as an env var * lets auto-trigger a self-hosted plugin install w/ the onCallApiUrl passed in as an env var */ - const errorMsg = await PluginState.selfHostedInstallPlugin(processEnvOnCallApiUrl, true); + const errorMsg = await PluginState.selfHostedInstallPlugin(onCallApiUrl, true); if (errorMsg) { setPluginConnectionCheckError(errorMsg); setCheckingIfPluginIsConnected(false); @@ -146,7 +144,7 @@ const PluginConfigPage: FC = ({ if (!pluginConfiguredRedirect) { configurePluginAndUpdatePluginStatus(); } - }, [pluginMetaOnCallApiUrl, processEnvOnCallApiUrl, onCallApiUrl, pluginConfiguredRedirect]); + }, [onCallApiUrl, pluginConfiguredRedirect]); const resetMessages = useCallback(() => { setPluginResetError(null); @@ -210,9 +208,7 @@ const PluginConfigPage: FC = ({ ); } else if (!pluginIsConnected) { - content = ( - - ); + content = ; } else { // plugin is fully connected and synced const pluginLink = ( diff --git a/grafana-plugin/src/pages/integration/Integration.tsx b/grafana-plugin/src/pages/integration/Integration.tsx index 249ea223..3a220645 100644 --- a/grafana-plugin/src/pages/integration/Integration.tsx +++ b/grafana-plugin/src/pages/integration/Integration.tsx @@ -420,7 +420,7 @@ class Integration extends React.Component { Autoresolve: - {IntegrationHelper.truncateLine(templates['resolve_condition_template'] || 'disabled')} + {IntegrationHelper.truncateLine(templates?.['resolve_condition_template'] || 'disabled')} @@ -1131,7 +1131,7 @@ const IntegrationHeader: React.FC = ({ {alertReceiveChannel.maintenance_till && ( = ({ return (
{alertReceiveChannel.is_available_for_integration_heartbeat && heartbeat?.last_heartbeat_time_verbal && ( ({ }, })); +const onCallApiUrl = 'http://oncall-dev-engine:8080'; + const isUserActionAllowed = isUserActionAllowedOriginal as jest.Mock>; const generatePluginData = ( @@ -32,7 +34,6 @@ describe('rootBaseStore', () => { }); test("onCallApiUrl is not set in the plugin's meta jsonData", async () => { - // mocks/setup const rootBaseStore = new RootBaseStore(); // test @@ -43,9 +44,7 @@ describe('rootBaseStore', () => { }); test('when there is an issue checking the plugin connection, the error is properly handled', async () => { - // mocks/setup const errorMsg = 'ohhh noooo error'; - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce(errorMsg); @@ -61,8 +60,6 @@ describe('rootBaseStore', () => { }); test('currently undergoing maintenance', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const maintenanceMessage = 'mncvnmvcmnvkjdjkd'; @@ -82,8 +79,6 @@ describe('rootBaseStore', () => { }); test('anonymous user', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce({ @@ -108,8 +103,6 @@ describe('rootBaseStore', () => { }); test('the plugin is not installed, and allow_signup is false', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); PluginState.updatePluginStatus = jest.fn().mockResolvedValueOnce({ @@ -137,8 +130,6 @@ describe('rootBaseStore', () => { }); test('plugin is not installed, user is not an Admin', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); contextSrv.user.orgRole = OrgRole.Viewer; @@ -174,8 +165,6 @@ describe('rootBaseStore', () => { { is_installed: false, token_ok: true }, { is_installed: true, token_ok: false }, ])('signup is allowed, user is an admin, plugin installation is triggered', async (scenario) => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const mockedLoadCurrentUser = jest.fn(); @@ -219,8 +208,6 @@ describe('rootBaseStore', () => { expected_result: false, }, ])('signup is allowed, licensedAccessControlEnabled, various roles and permissions', async (scenario) => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const mockedLoadCurrentUser = jest.fn(); @@ -261,8 +248,6 @@ describe('rootBaseStore', () => { }); test('plugin is not installed, signup is allowed, the user is an admin, and plugin installation throws an error', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const installPluginError = new Error('asdasdfasdfasf'); const humanReadableErrorMsg = 'asdfasldkfjaksdjflk'; @@ -304,8 +289,6 @@ describe('rootBaseStore', () => { }); test('when the plugin is installed, a data sync is triggered', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const mockedLoadCurrentUser = jest.fn(); @@ -333,8 +316,6 @@ describe('rootBaseStore', () => { }); test('when the plugin is installed, and the data sync returns an error, it is properly handled', async () => { - // mocks/setup - const onCallApiUrl = 'http://asdfasdf.com'; const rootBaseStore = new RootBaseStore(); const mockedLoadCurrentUser = jest.fn(); const updatePluginStatusError = 'asdasdfasdfasf'; diff --git a/grafana-plugin/src/utils/consts.ts b/grafana-plugin/src/utils/consts.ts index 34c73908..37e421f3 100644 --- a/grafana-plugin/src/utils/consts.ts +++ b/grafana-plugin/src/utils/consts.ts @@ -1,3 +1,5 @@ +import { OnCallAppPluginMeta } from 'types'; + import plugin from '../../package.json'; // eslint-disable-line // Navbar @@ -30,6 +32,13 @@ export const ONCALL_PROD = 'https://oncall-prod-us-central-0.grafana.net/oncall' export const ONCALL_OPS = 'https://oncall-ops-us-east-0.grafana.net/oncall'; export const ONCALL_DEV = 'https://oncall-dev-us-central-0.grafana.net/oncall'; +// Single source of truth on the frontend for OnCall API URL +export const getOnCallApiUrl = (meta?: OnCallAppPluginMeta) => + meta?.jsonData?.onCallApiUrl || process.env.ONCALL_API_URL; + +// If the plugin has never been configured, onCallApiUrl will be undefined in the plugin's jsonData +export const hasPluginBeenConfigured = (meta?: OnCallAppPluginMeta) => Boolean(meta?.jsonData?.onCallApiUrl); + // Faro export const FARO_ENDPOINT_DEV = 'https://faro-collector-prod-us-central-0.grafana.net/collect/fb03e474a96cf867f4a34590c002984c'; From 2b62da77b7afb146fa629905f7e61c9b6d261d0e Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Fri, 15 Dec 2023 10:33:01 +0100 Subject: [PATCH 02/16] Check if escalation was skipped in Slack before trying to notify user (#3562) # What this PR does Updates check if escalation was skipped in Slack before trying to notify user by Slack. ## Which issue(s) this PR fixes ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 4 ++ engine/apps/alerts/models/alert_group.py | 1 + engine/apps/alerts/tasks/notify_user.py | 18 ++++-- engine/apps/alerts/tests/test_notify_user.py | 62 ++++++++++++++++++++ 4 files changed, 81 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a62c032..da53395f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516)) +### Fixed + +- Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562)) + ## v1.3.80 (2023-12-14) ### Added diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py index 54e86764..f13c6c45 100644 --- a/engine/apps/alerts/models/alert_group.py +++ b/engine/apps/alerts/models/alert_group.py @@ -489,6 +489,7 @@ class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models. AlertGroup.ACCOUNT_INACTIVE, AlertGroup.RATE_LIMITED, AlertGroup.CHANNEL_NOT_SPECIFIED, + AlertGroup.RESTRICTED_ACTION, ) def is_alert_a_resolve_signal(self, alert): diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py index b3165558..4a125b64 100644 --- a/engine/apps/alerts/tasks/notify_user.py +++ b/engine/apps/alerts/tasks/notify_user.py @@ -287,19 +287,29 @@ def perform_notification(log_record_pk): # Code below is not consistent. # We check various slack reasons to skip escalation in this task, in send_slack_notification, # before and after posting of slack message. - if alert_group.reason_to_skip_escalation == alert_group.RATE_LIMITED: + if alert_group.skip_escalation_in_slack: + notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK + if alert_group.reason_to_skip_escalation == alert_group.RATE_LIMITED: + notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT + elif alert_group.reason_to_skip_escalation == alert_group.CHANNEL_ARCHIVED: + notification_error_code = ( + UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED + ) + elif alert_group.reason_to_skip_escalation == alert_group.ACCOUNT_INACTIVE: + notification_error_code = UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR task_logger.debug( - f"send_slack_notification for alert_group {alert_group.pk} failed because of slack ratelimit." + f"send_slack_notification for alert_group {alert_group.pk} failed because escalation in slack is " + f"skipped, reason: '{alert_group.get_reason_to_skip_escalation_display()}'" ) UserNotificationPolicyLogRecord( author=user, type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, notification_policy=notification_policy, - reason="Slack ratelimit", + reason=f"Skipped escalation in Slack, reason: '{alert_group.get_reason_to_skip_escalation_display()}'", alert_group=alert_group, notification_step=notification_policy.step, notification_channel=notification_channel, - notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT, + notification_error_code=notification_error_code, ).save() return diff --git a/engine/apps/alerts/tests/test_notify_user.py b/engine/apps/alerts/tests/test_notify_user.py index e6cffe1c..98d8bc2b 100644 --- a/engine/apps/alerts/tests/test_notify_user.py +++ b/engine/apps/alerts/tests/test_notify_user.py @@ -2,10 +2,12 @@ from unittest.mock import patch import pytest +from apps.alerts.models import AlertGroup from apps.alerts.tasks.notify_user import notify_user_task, perform_notification from apps.api.permissions import LegacyAccessControlRole from apps.base.models.user_notification_policy import UserNotificationPolicy from apps.base.models.user_notification_policy_log_record import UserNotificationPolicyLogRecord +from apps.slack.models import SlackMessage NOTIFICATION_UNAUTHORIZED_MSG = "notification is not allowed for user" @@ -178,3 +180,63 @@ def test_notify_user_error_if_viewer( assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED assert error_log_record.reason == NOTIFICATION_UNAUTHORIZED_MSG assert error_log_record.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_FORBIDDEN + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "reason_to_skip_escalation,error_code", + [ + (AlertGroup.RATE_LIMITED, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT), + (AlertGroup.CHANNEL_ARCHIVED, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED), + (AlertGroup.ACCOUNT_INACTIVE, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_TOKEN_ERROR), + (AlertGroup.RESTRICTED_ACTION, UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK), + (AlertGroup.NO_REASON, None), + ], +) +def test_perform_notification_reason_to_skip_escalation_in_slack( + reason_to_skip_escalation, + error_code, + make_organization, + make_slack_team_identity, + make_user, + make_user_notification_policy, + make_alert_receive_channel, + make_alert_group, + make_user_notification_policy_log_record, + make_slack_message, +): + organization = make_organization() + slack_team_identity = make_slack_team_identity() + organization.slack_team_identity = slack_team_identity + organization.save() + user = make_user(organization=organization) + user_notification_policy = make_user_notification_policy( + user=user, + step=UserNotificationPolicy.Step.NOTIFY, + notify_by=UserNotificationPolicy.NotificationChannel.SLACK, + ) + alert_receive_channel = make_alert_receive_channel(organization=organization) + alert_group = make_alert_group(alert_receive_channel=alert_receive_channel) + alert_group.reason_to_skip_escalation = reason_to_skip_escalation + alert_group.save() + log_record = make_user_notification_policy_log_record( + author=user, + alert_group=alert_group, + notification_policy=user_notification_policy, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + if not error_code: + make_slack_message(alert_group=alert_group, channel_id="test_channel_id", slack_id="test_slack_id") + with patch.object(SlackMessage, "send_slack_notification") as mocked_send_slack_notification: + perform_notification(log_record.pk) + last_log_record = UserNotificationPolicyLogRecord.objects.last() + + if error_code: + log_reason = f"Skipped escalation in Slack, reason: '{alert_group.get_reason_to_skip_escalation_display()}'" + mocked_send_slack_notification.assert_not_called() + assert last_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED + assert last_log_record.reason == log_reason + assert last_log_record.notification_error_code == error_code + else: + mocked_send_slack_notification.assert_called() + assert last_log_record.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED From 3c26f8e483c601b326fa8f42349d5d88a3ef2cf5 Mon Sep 17 00:00:00 2001 From: Kleber Rocha Date: Fri, 15 Dec 2023 09:53:36 -0300 Subject: [PATCH 03/16] Fix: the examples in extraVolumeMounts and extraVolumes are swapped (#3565) The examples at extraVolumeMounts and extraVolumes properties are swapped # What this PR does Fixing the properties extraVolumeMounts and extraVolumes in Helm chart ## Which issue(s) this PR fixes ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) Signed-off-by: Kleber Rocha Co-authored-by: Joey Orlando --- helm/oncall/values.yaml | 70 ++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/helm/oncall/values.yaml b/helm/oncall/values.yaml index e5323893..97119654 100644 --- a/helm/oncall/values.yaml +++ b/helm/oncall/values.yaml @@ -82,6 +82,13 @@ engine: # Extra volume mounts for the main app container extraVolumeMounts: [] + # - mountPath: /mnt/postgres-tls + # name: postgres-tls + # - mountPath: /mnt/redis-tls + # name: redis-tls + + # Extra volumes for the pod + extraVolumes: [] # - name: postgres-tls # configMap: # name: my-postgres-tls @@ -91,13 +98,6 @@ engine: # name: my-redis-tls # defaultMode: 0640 - # Extra volumes for the pod - extraVolumes: [] - # - mountPath: /mnt/postgres-tls - # name: postgres-tls - # - mountPath: /mnt/redis-tls - # name: redis-tls - detached_integrations_service: enabled: false type: LoadBalancer @@ -157,6 +157,13 @@ detached_integrations: # Extra volume mounts for the container extraVolumeMounts: [] + # - mountPath: /mnt/postgres-tls + # name: postgres-tls + # - mountPath: /mnt/redis-tls + # name: redis-tls + + # Extra volumes for the pod + extraVolumes: [] # - name: postgres-tls # configMap: # name: my-postgres-tls @@ -166,13 +173,6 @@ detached_integrations: # name: my-redis-tls # defaultMode: 0640 - # Extra volumes for the pod - extraVolumes: [] - # - mountPath: /mnt/postgres-tls - # name: postgres-tls - # - mountPath: /mnt/redis-tls - # name: redis-tls - # Celery workers pods configuration celery: replicaCount: 1 @@ -235,6 +235,13 @@ celery: # Extra volume mounts for the main container extraVolumeMounts: [] + # - mountPath: /mnt/postgres-tls + # name: postgres-tls + # - mountPath: /mnt/redis-tls + # name: redis-tls + + # Extra volumes for the pod + extraVolumes: [] # - name: postgres-tls # configMap: # name: my-postgres-tls @@ -244,13 +251,6 @@ celery: # name: my-redis-tls # defaultMode: 0640 - # Extra volumes for the pod - extraVolumes: [] - # - mountPath: /mnt/postgres-tls - # name: postgres-tls - # - mountPath: /mnt/redis-tls - # name: redis-tls - # Telegram polling pod configuration telegramPolling: enabled: false @@ -268,6 +268,13 @@ telegramPolling: # Extra volume mounts for the main container extraVolumeMounts: [] + # - mountPath: /mnt/postgres-tls + # name: postgres-tls + # - mountPath: /mnt/redis-tls + # name: redis-tls + + # Extra volumes for the pod + extraVolumes: [] # - name: postgres-tls # configMap: # name: my-postgres-tls @@ -277,13 +284,6 @@ telegramPolling: # name: my-redis-tls # defaultMode: 0640 - # Extra volumes for the pod - extraVolumes: [] - # - mountPath: /mnt/postgres-tls - # name: postgres-tls - # - mountPath: /mnt/redis-tls - # name: redis-tls - oncall: # this is intended to be used for local development. In short, it will mount the ./engine dir into # any backend related containers, to allow hot-reloading + also run the containers with slightly modified @@ -420,6 +420,13 @@ migrate: # Extra volume mounts for the main container extraVolumeMounts: [] + # - mountPath: /mnt/postgres-tls + # name: postgres-tls + # - mountPath: /mnt/redis-tls + # name: redis-tls + + # Extra volumes for the pod + extraVolumes: [] # - name: postgres-tls # configMap: # name: my-postgres-tls @@ -429,13 +436,6 @@ migrate: # name: my-redis-tls # defaultMode: 0640 - # Extra volumes for the pod - extraVolumes: [] - # - mountPath: /mnt/postgres-tls - # name: postgres-tls - # - mountPath: /mnt/redis-tls - # name: redis-tls - # Sets environment variables with name capitalized and prefixed with UWSGI_, # and dashes are substituted with underscores. # see more: https://uwsgi-docs.readthedocs.io/en/latest/Configuration.html#environment-variables From e7f3eff72cf54d073479148619b5a45788f14f97 Mon Sep 17 00:00:00 2001 From: Michael Derynck Date: Fri, 15 Dec 2023 09:50:01 -0700 Subject: [PATCH 04/16] Limit how long acknowledge reminders can run for (#3571) # What this PR does Stops rescheduling of `acknowledge_reminder_task` after 2 weeks. Assumption being if it has been sitting for that long in acknowledged state it is likely to not need more reminders that it is still acknowledged. Notifications for thread were probably muted a long time ago. ## Which issue(s) this PR fixes ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 4 ++++ .../apps/alerts/tasks/acknowledge_reminder.py | 7 ++++++ .../alerts/tests/test_acknowledge_reminder.py | 24 +++++++++++++++++++ engine/settings/base.py | 2 ++ 4 files changed, 37 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da53395f..d87d0315 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516)) +### Changed + +- Limit acknowledge reminders to stop repeating after 1 month @mderynck ([#3571](https://github.com/grafana/oncall/pull/3571)) + ### Fixed - Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562)) diff --git a/engine/apps/alerts/tasks/acknowledge_reminder.py b/engine/apps/alerts/tasks/acknowledge_reminder.py index dd9848ac..be6efb05 100644 --- a/engine/apps/alerts/tasks/acknowledge_reminder.py +++ b/engine/apps/alerts/tasks/acknowledge_reminder.py @@ -1,7 +1,9 @@ +from datetime import timedelta from functools import partial from django.conf import settings from django.db import transaction +from django.utils import timezone from common.custom_celery_tasks import shared_dedicated_queue_retry_task @@ -61,6 +63,11 @@ def acknowledge_reminder_task(alert_group_pk: int, unacknowledge_process_id: str (alert_group.pk, unacknowledge_process_id), countdown=unacknowledge_timeout ) else: + if alert_group.started_at < timezone.now() - timedelta(days=settings.ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS): + task_logger.info( + f"alert group {alert_group_pk} not renewing acknowledgement reminder, started_at is too old. {log_info}" + ) + return acknowledge_reminder_task.apply_async( (alert_group.pk, unacknowledge_process_id), countdown=acknowledge_reminder_timeout ) diff --git a/engine/apps/alerts/tests/test_acknowledge_reminder.py b/engine/apps/alerts/tests/test_acknowledge_reminder.py index f66fe9af..759c8c1d 100644 --- a/engine/apps/alerts/tests/test_acknowledge_reminder.py +++ b/engine/apps/alerts/tests/test_acknowledge_reminder.py @@ -1,3 +1,4 @@ +from datetime import timedelta from unittest.mock import patch import pytest @@ -339,3 +340,26 @@ def test_unacknowledge_timeout_task_skip_deleted_org( mock_acknowledge_reminder_task.assert_not_called() assert not alert_group.log_records.exists() + + +@patch.object(acknowledge_reminder_task, "apply_async") +@patch.object(unacknowledge_timeout_task, "apply_async") +@pytest.mark.django_db +def test_ack_reminder_cancel_too_old( + mock_acknowledge_reminder_task, + mock_unacknowledge_timeout_task, + ack_reminder_test_setup, + settings, +): + organization, alert_group, user = ack_reminder_test_setup( + unacknowledge_timeout=Organization.UNACKNOWLEDGE_TIMEOUT_NEVER + ) + alert_group.started_at = timezone.now() - timedelta(days=settings.ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS + 1) + alert_group.save() + + acknowledge_reminder_task(alert_group.pk, TASK_ID) + + mock_unacknowledge_timeout_task.assert_not_called() + mock_acknowledge_reminder_task.assert_not_called() + + assert not alert_group.log_records.exists() diff --git a/engine/settings/base.py b/engine/settings/base.py index b8e23aa6..bc8935ac 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -848,3 +848,5 @@ ZVONOK_POSTBACK_USER_CHOICE = os.getenv("ZVONOK_POSTBACK_USER_CHOICE", None) ZVONOK_POSTBACK_USER_CHOICE_ACK = os.getenv("ZVONOK_POSTBACK_USER_CHOICE_ACK", None) DETACHED_INTEGRATIONS_SERVER = getenv_boolean("DETACHED_INTEGRATIONS_SERVER", default=False) + +ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS = os.environ.get("ACKNOWLEDGE_REMINDER_TASK_EXPIRY_DAYS", default=14) From 8ade7d65e8450a5c7997b1029862565383687aef Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Fri, 15 Dec 2023 18:15:50 +0100 Subject: [PATCH 05/16] Fix alert group columns validation (#3577) # What this PR does Fix alert group columns validation: - validate column ids by each type separately ## Which issue(s) this PR fixes validation check from this issue - https://github.com/grafana/oncall-private/issues/2378 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 1 + .../serializers/alert_group_table_settings.py | 15 +++++++---- .../tests/test_alert_group_table_settings.py | 26 +++++++++++++++++-- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d87d0315..b0b487ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562)) +- Fix alert group table columns validation @Ferril ([#3577](https://github.com/grafana/oncall/pull/3577)) ## v1.3.80 (2023-12-14) diff --git a/engine/apps/api/serializers/alert_group_table_settings.py b/engine/apps/api/serializers/alert_group_table_settings.py index 24dccf1a..0b24c791 100644 --- a/engine/apps/api/serializers/alert_group_table_settings.py +++ b/engine/apps/api/serializers/alert_group_table_settings.py @@ -34,14 +34,19 @@ class AlertGroupTableColumnsOrganizationSerializer(serializers.Serializer): """ Validate that at least one column is selected as visible and that all default columns are in the list. """ - columns = data["visible"] + data["hidden"] - request_columns_ids = [column["id"] for column in columns] + request_columns_by_type = {} + for column in data["visible"] + data["hidden"]: + request_columns_by_type.setdefault(column["type"], []).append(column["id"]) if len(data["visible"]) == 0: raise ValidationError("At least one column should be selected as visible") - elif not set(request_columns_ids) >= set(AlertGroupTableDefaultColumnChoices.values): + elif not ( + set(request_columns_by_type[AlertGroupTableColumnTypeChoices.DEFAULT]) + == set(AlertGroupTableDefaultColumnChoices.values) + ): raise ValidationError("Default column cannot be removed") - elif len(request_columns_ids) > len(set(request_columns_ids)): - raise ValidationError("Duplicate column") + for columns_ids in request_columns_by_type.values(): + if len(columns_ids) > len(set(columns_ids)): + raise ValidationError("Duplicate column") return data diff --git a/engine/apps/api/tests/test_alert_group_table_settings.py b/engine/apps/api/tests/test_alert_group_table_settings.py index 175f3308..edbab9e9 100644 --- a/engine/apps/api/tests/test_alert_group_table_settings.py +++ b/engine/apps/api/tests/test_alert_group_table_settings.py @@ -5,7 +5,11 @@ from rest_framework.test import APIClient from apps.api.alert_group_table_columns import alert_group_table_user_settings from apps.api.permissions import LegacyAccessControlRole -from apps.user_management.constants import AlertGroupTableColumnTypeChoices, default_columns +from apps.user_management.constants import ( + AlertGroupTableColumnTypeChoices, + AlertGroupTableDefaultColumnChoices, + default_columns, +) DEFAULT_COLUMNS = default_columns() @@ -41,6 +45,18 @@ def test_get_columns( columns_settings({"name": "Test", "id": "test", "type": AlertGroupTableColumnTypeChoices.LABEL.value}), status.HTTP_200_OK, ), + # add label column with the same id as default + ( + columns_settings(), + columns_settings({"name": "Status", "id": "status", "type": AlertGroupTableColumnTypeChoices.LABEL.value}), + status.HTTP_200_OK, + ), + # add unexisting default column + ( + columns_settings(), + columns_settings({"name": "Hello", "id": "hello", "type": AlertGroupTableColumnTypeChoices.DEFAULT.value}), + status.HTTP_400_BAD_REQUEST, + ), # remove column ( columns_settings({"name": "Test", "id": "test", "type": AlertGroupTableColumnTypeChoices.LABEL.value}), @@ -60,7 +76,13 @@ def test_get_columns( # duplicate id ( columns_settings(), - columns_settings({"name": "Test", "id": 1, "type": AlertGroupTableColumnTypeChoices.DEFAULT.value}), + columns_settings( + { + "name": "Test", + "id": AlertGroupTableDefaultColumnChoices.STATUS.value, + "type": AlertGroupTableColumnTypeChoices.DEFAULT.value, + } + ), status.HTTP_400_BAD_REQUEST, ), # remove default column From 36227418ed3e1d6d476bdd8a2962870f5c8e76f9 Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Mon, 18 Dec 2023 13:28:55 +0100 Subject: [PATCH 06/16] Speed up escalation auditor (#3578) # What this PR does Speed up escalation auditor - use raw escalation snapshot instead of serialized one ## Which issue(s) this PR fixes ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- .../escalation_snapshot_mixin.py | 20 +++++++++++ .../snapshot_classes/escalation_snapshot.py | 14 -------- .../alerts/tasks/check_escalation_finished.py | 33 ++++++++++--------- .../test_check_escalation_finished_task.py | 24 ++++++++------ .../alerts/tests/test_escalation_snapshot.py | 4 ++- 5 files changed, 54 insertions(+), 41 deletions(-) diff --git a/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py b/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py index 0c06054c..8483ef55 100644 --- a/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py +++ b/engine/apps/alerts/escalation_snapshot/escalation_snapshot_mixin.py @@ -5,6 +5,7 @@ import typing import pytz from celery import uuid as celery_uuid from dateutil.parser import parse +from django.utils import timezone from django.utils.functional import cached_property from rest_framework.exceptions import ValidationError @@ -212,6 +213,12 @@ class EscalationSnapshotMixin: return False return self.raw_escalation_snapshot.get("pause_escalation", False) + @property + def last_active_escalation_policy_order(self) -> typing.Optional[int]: + if not self.raw_escalation_snapshot: + return None + return self.raw_escalation_snapshot.get("last_active_escalation_policy_order") + @property def next_step_eta(self) -> typing.Optional[datetime.datetime]: """ @@ -223,6 +230,19 @@ class EscalationSnapshotMixin: raw_next_step_eta = self.raw_escalation_snapshot.get("next_step_eta") return None if not raw_next_step_eta else parse(raw_next_step_eta).replace(tzinfo=pytz.UTC) + def next_step_eta_is_valid(self) -> typing.Optional[bool]: + """ + `next_step_eta` should never be less than the current time (with a 5 minute buffer provided) + as this field should be updated as the escalation policy is executed over time. If it is, this means that + an escalation policy step has been missed, or is substantially delayed + + if `next_step_eta` is `None` then `None` is returned, otherwise a boolean is returned + representing the result of the time comparision + """ + if self.next_step_eta is None: + return None + return self.next_step_eta > (timezone.now() - datetime.timedelta(minutes=5)) + def update_next_step_eta(self, increase_by_timedelta: datetime.timedelta) -> typing.Optional[dict]: """ update next_step_eta field directly to avoid serialization overhead diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py index 71bb3854..e6269a00 100644 --- a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py +++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_snapshot.py @@ -3,7 +3,6 @@ import logging import typing from celery.utils.log import get_task_logger -from django.utils import timezone from apps.alerts.escalation_snapshot.serializers import EscalationSnapshotSerializer from apps.alerts.models.alert_group_log_record import AlertGroupLogRecord @@ -90,19 +89,6 @@ class EscalationSnapshot: return [] return self.escalation_policies_snapshots[: self.last_active_escalation_policy_order + 1] - def next_step_eta_is_valid(self) -> typing.Optional[bool]: - """ - `next_step_eta` should never be less than the current time (with a 5 minute buffer provided) - as this field should be updated as the escalation policy is executed over time. If it is, this means that - an escalation policy step has been missed, or is substantially delayed - - if `next_step_eta` is `None` then `None` is returned, otherwise a boolean is returned - representing the result of the time comparision - """ - if self.next_step_eta is None: - return None - return self.next_step_eta > (timezone.now() - datetime.timedelta(minutes=5)) - def save_to_alert_group(self) -> None: self.alert_group.raw_escalation_snapshot = self.convert_to_dict() self.alert_group.save(update_fields=["raw_escalation_snapshot"]) diff --git a/engine/apps/alerts/tasks/check_escalation_finished.py b/engine/apps/alerts/tasks/check_escalation_finished.py index 211f8a57..88d1ae4f 100644 --- a/engine/apps/alerts/tasks/check_escalation_finished.py +++ b/engine/apps/alerts/tasks/check_escalation_finished.py @@ -29,26 +29,26 @@ def send_alert_group_escalation_auditor_task_heartbeat() -> None: def audit_alert_group_escalation(alert_group: "AlertGroup") -> None: - escalation_snapshot = alert_group.escalation_snapshot + raw_escalation_snapshot: dict = alert_group.raw_escalation_snapshot alert_group_id = alert_group.id base_msg = f"Alert group {alert_group_id}" - if not alert_group.escalation_chain_exists: + if not raw_escalation_snapshot: + msg = f"{base_msg} does not have an escalation snapshot associated with it, this should never occur" + + task_logger.warning(msg) + raise AlertGroupEscalationPolicyExecutionAuditException(msg) + + if not raw_escalation_snapshot.get("escalation_chain_snapshot"): task_logger.info( f"{base_msg} does not have an escalation chain associated with it, and therefore it is expected " "that it will not have an escalation snapshot, skipping further validation" ) return - if not escalation_snapshot: - msg = f"{base_msg} does not have an escalation snapshot associated with it, this should never occur" - - task_logger.warning(msg) - raise AlertGroupEscalationPolicyExecutionAuditException(msg) - task_logger.info(f"{base_msg} has an escalation snapshot associated with it, auditing if it executed properly") - escalation_policies_snapshots = escalation_snapshot.escalation_policies_snapshots + escalation_policies_snapshots = raw_escalation_snapshot.get("escalation_policies_snapshots") if not escalation_policies_snapshots: task_logger.info( @@ -59,18 +59,19 @@ def audit_alert_group_escalation(alert_group: "AlertGroup") -> None: f"{base_msg}'s escalation snapshot has a populated escalation_policies_snapshots, continuing validation" ) - if escalation_snapshot.next_step_eta_is_valid() is False: - msg = ( - f"{base_msg}'s escalation snapshot does not have a valid next_step_eta: {escalation_snapshot.next_step_eta}" - ) + if alert_group.next_step_eta_is_valid() is False: + msg = f"{base_msg}'s escalation snapshot does not have a valid next_step_eta: {alert_group.next_step_eta}" task_logger.warning(msg) raise AlertGroupEscalationPolicyExecutionAuditException(msg) - task_logger.info(f"{base_msg}'s escalation snapshot has a valid next_step_eta: {escalation_snapshot.next_step_eta}") + task_logger.info(f"{base_msg}'s escalation snapshot has a valid next_step_eta: {alert_group.next_step_eta}") - executed_escalation_policy_snapshots = escalation_snapshot.executed_escalation_policy_snapshots - num_of_executed_escalation_policy_snapshots = len(executed_escalation_policy_snapshots) + num_of_executed_escalation_policy_snapshots = ( + alert_group.last_active_escalation_policy_order + 1 + if alert_group.last_active_escalation_policy_order is not None + else 0 + ) if num_of_executed_escalation_policy_snapshots == 0: task_logger.info( diff --git a/engine/apps/alerts/tests/test_check_escalation_finished_task.py b/engine/apps/alerts/tests/test_check_escalation_finished_task.py index 389e0fd9..d635db10 100644 --- a/engine/apps/alerts/tests/test_check_escalation_finished_task.py +++ b/engine/apps/alerts/tests/test_check_escalation_finished_task.py @@ -85,7 +85,7 @@ def test_send_alert_group_escalation_auditor_task_heartbeat_raises_an_exception_ @pytest.mark.django_db -def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_not_have_an_escalation_chain( +def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_not_have_an_escalation_chain_snapshot( make_organization_and_user, make_alert_receive_channel, make_alert_group, @@ -94,10 +94,10 @@ def test_audit_alert_group_escalation_skips_validation_if_the_alert_group_does_n alert_receive_channel = make_alert_receive_channel(organization) alert_group = make_alert_group(alert_receive_channel) - alert_group.escalation_snapshot = None + alert_group.raw_escalation_snapshot = {"escalation_chain_snapshot": None} alert_group.save() - assert alert_group.escalation_chain_exists is False + assert alert_group.raw_escalation_snapshot["escalation_chain_snapshot"] is None try: audit_alert_group_escalation(alert_group) @@ -110,7 +110,8 @@ def test_audit_alert_group_escalation_raises_exception_if_the_alert_group_does_n escalation_snapshot_test_setup, ): alert_group, _, _, _ = escalation_snapshot_test_setup - alert_group.escalation_snapshot = None + alert_group.raw_escalation_snapshot = None + alert_group.save() with pytest.raises(AlertGroupEscalationPolicyExecutionAuditException): audit_alert_group_escalation(alert_group) @@ -123,13 +124,16 @@ def test_audit_alert_group_escalation_skips_further_validation_if_the_escalation alert_group, _, _, _ = escalation_snapshot_test_setup alert_group.escalation_snapshot.escalation_policies_snapshots = [] + alert_group.raw_escalation_snapshot = {"escalation_policies_snapshots": []} + alert_group.save() audit_alert_group_escalation(alert_group) - alert_group.escalation_snapshot.escalation_policies_snapshots = None + alert_group.raw_escalation_snapshot["escalation_policies_snapshots"] = None + alert_group.save() audit_alert_group_escalation(alert_group) -@patch("apps.alerts.escalation_snapshot.snapshot_classes.escalation_snapshot.EscalationSnapshot.next_step_eta_is_valid") +@patch("apps.alerts.escalation_snapshot.escalation_snapshot_mixin.EscalationSnapshotMixin.next_step_eta_is_valid") @pytest.mark.django_db @pytest.mark.parametrize( "next_step_eta_is_valid_return_value,raises_exception", @@ -158,18 +162,18 @@ def test_audit_alert_group_escalation_next_step_eta_validation( @patch( - "apps.alerts.escalation_snapshot.snapshot_classes.escalation_snapshot.EscalationSnapshot.executed_escalation_policy_snapshots", + "apps.alerts.escalation_snapshot.escalation_snapshot_mixin.EscalationSnapshotMixin.last_active_escalation_policy_order", new_callable=PropertyMock, ) @pytest.mark.django_db def test_audit_alert_group_escalation_no_executed_escalation_policy_snapshots( - mock_executed_escalation_policy_snapshots, escalation_snapshot_test_setup + mock_last_active_escalation_policy_order, escalation_snapshot_test_setup ): alert_group, _, _, _ = escalation_snapshot_test_setup - mock_executed_escalation_policy_snapshots.return_value = [] + mock_last_active_escalation_policy_order.return_value = None audit_alert_group_escalation(alert_group) - mock_executed_escalation_policy_snapshots.assert_called_once_with() + mock_last_active_escalation_policy_order.assert_called_once_with() # # see TODO: comment in engine/apps/alerts/tasks/check_escalation_finished.py diff --git a/engine/apps/alerts/tests/test_escalation_snapshot.py b/engine/apps/alerts/tests/test_escalation_snapshot.py index da0b6304..2bfbf2f8 100644 --- a/engine/apps/alerts/tests/test_escalation_snapshot.py +++ b/engine/apps/alerts/tests/test_escalation_snapshot.py @@ -196,8 +196,10 @@ def test_next_step_eta_is_valid(escalation_snapshot_test_setup, next_step_eta, e escalation_snapshot = alert_group.escalation_snapshot escalation_snapshot.next_step_eta = next_step_eta + escalation_snapshot.save_to_alert_group() + alert_group.refresh_from_db() - assert escalation_snapshot.next_step_eta_is_valid() is expected + assert alert_group.next_step_eta_is_valid() is expected @pytest.mark.django_db From f68b9dd004b3262512d2a0f7ded37cb60ad1afdb Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Mon, 18 Dec 2023 13:13:18 -0300 Subject: [PATCH 07/16] Update auditor to check personal notifications (#3563) Requires https://github.com/grafana/oncall/pull/3557 Related to https://github.com/grafana/oncall-private/issues/2347 --- .../alerts/tasks/check_escalation_finished.py | 32 ++++- engine/apps/alerts/tasks/notify_user.py | 18 ++- .../test_check_escalation_finished_task.py | 125 ++++++++++++++++++ engine/apps/alerts/tests/test_notify_user.py | 45 +++++++ engine/settings/celery_task_routes.py | 1 + 5 files changed, 219 insertions(+), 2 deletions(-) diff --git a/engine/apps/alerts/tasks/check_escalation_finished.py b/engine/apps/alerts/tasks/check_escalation_finished.py index 88d1ae4f..0204aaea 100644 --- a/engine/apps/alerts/tasks/check_escalation_finished.py +++ b/engine/apps/alerts/tasks/check_escalation_finished.py @@ -4,7 +4,7 @@ import typing import requests from celery import shared_task from django.conf import settings -from django.db.models import Avg, F, Max +from django.db.models import Avg, F, Max, Q from django.utils import timezone from apps.alerts.tasks.task_logger import task_logger @@ -82,9 +82,39 @@ def audit_alert_group_escalation(alert_group: "AlertGroup") -> None: f"{base_msg}'s escalation snapshot has {num_of_executed_escalation_policy_snapshots} executed escalation policies" ) + check_personal_notifications_task.apply_async((alert_group_id,)) + task_logger.info(f"{base_msg} passed the audit checks") +@shared_task +def check_personal_notifications_task(alert_group_id) -> None: + # Check personal notifications are completed + # triggered (< 5min ago) == failed + success + from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord + + triggered = UserNotificationPolicyLogRecord.objects.filter( + alert_group_id=alert_group_id, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + notification_step=UserNotificationPolicy.Step.NOTIFY, + created_at__lte=timezone.now() - timezone.timedelta(minutes=5), + ).count() + completed = UserNotificationPolicyLogRecord.objects.filter( + Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED) + | Q(type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS), + alert_group_id=alert_group_id, + notification_step=UserNotificationPolicy.Step.NOTIFY, + ).count() + + base_msg = f"Alert group {alert_group_id}" + delta = triggered - completed + if delta > 0: + # TODO: when success notifications are setup for every backend, raise exception here + task_logger.info(f"{base_msg} has ({delta}) uncompleted personal notifications") + else: + task_logger.info(f"{base_msg} personal notifications check passed") + + @shared_task def check_escalation_finished_task() -> None: """ diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py index 4a125b64..7578ba53 100644 --- a/engine/apps/alerts/tasks/notify_user.py +++ b/engine/apps/alerts/tasks/notify_user.py @@ -313,7 +313,7 @@ def perform_notification(log_record_pk): ).save() return - if alert_group.notify_in_slack_enabled is True and not log_record.slack_prevent_posting: + if alert_group.notify_in_slack_enabled is True: # we cannot notify users in Slack if their team does not have Slack integration if alert_group.channel.organization.slack_team_identity is None: task_logger.debug( @@ -332,6 +332,22 @@ def perform_notification(log_record_pk): ).save() return + if log_record.slack_prevent_posting: + task_logger.debug( + f"send_slack_notification for alert_group {alert_group.pk} failed because slack posting is disabled." + ) + UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + notification_policy=notification_policy, + reason="Prevented from posting in Slack", + alert_group=alert_group, + notification_step=notification_policy.step, + notification_channel=notification_channel, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED, + ).save() + return + retry_timeout_hours = 1 if alert_group.slack_message: alert_group.slack_message.send_slack_notification(user, alert_group, notification_policy) diff --git a/engine/apps/alerts/tests/test_check_escalation_finished_task.py b/engine/apps/alerts/tests/test_check_escalation_finished_task.py index d635db10..8a8c7b85 100644 --- a/engine/apps/alerts/tests/test_check_escalation_finished_task.py +++ b/engine/apps/alerts/tests/test_check_escalation_finished_task.py @@ -5,12 +5,15 @@ import requests from django.test import override_settings from django.utils import timezone +from apps.alerts.models import EscalationPolicy from apps.alerts.tasks.check_escalation_finished import ( AlertGroupEscalationPolicyExecutionAuditException, audit_alert_group_escalation, check_escalation_finished_task, + check_personal_notifications_task, send_alert_group_escalation_auditor_task_heartbeat, ) +from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord MOCKED_HEARTBEAT_URL = "https://hello.com/lsdjjkf" @@ -389,3 +392,125 @@ def test_check_escalation_finished_task_calls_audit_alert_group_escalation_for_e mocked_audit_alert_group_escalation.assert_any_call(alert_group3) mocked_send_alert_group_escalation_auditor_task_heartbeat.assert_not_called() + + +@patch("apps.alerts.tasks.check_escalation_finished.send_alert_group_escalation_auditor_task_heartbeat") +@pytest.mark.django_db +def test_check_escalation_finished_task_calls_audit_alert_group_personal_notifications( + mocked_send_alert_group_escalation_auditor_task_heartbeat, + make_organization_and_user, + make_user_notification_policy, + make_escalation_chain, + make_escalation_policy, + make_channel_filter, + make_alert_receive_channel, + make_alert_group_that_started_at_specific_date, + make_user_notification_policy_log_record, + caplog, +): + organization, user = make_organization_and_user() + user_notification_policy = make_user_notification_policy( + user=user, + step=UserNotificationPolicy.Step.NOTIFY, + notify_by=UserNotificationPolicy.NotificationChannel.SLACK, + ) + + alert_receive_channel = make_alert_receive_channel(organization) + escalation_chain = make_escalation_chain(organization) + channel_filter = make_channel_filter(alert_receive_channel, escalation_chain=escalation_chain) + notify_to_multiple_users_step = make_escalation_policy( + escalation_chain=channel_filter.escalation_chain, + escalation_policy_step=EscalationPolicy.STEP_NOTIFY_MULTIPLE_USERS, + ) + notify_to_multiple_users_step.notify_to_users_queue.set([user]) + + alert_group1 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter) + alert_group2 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter) + alert_group3 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter) + alert_group4 = make_alert_group_that_started_at_specific_date(alert_receive_channel, channel_filter=channel_filter) + alert_groups = [alert_group1, alert_group2, alert_group3, alert_group4] + for alert_group in alert_groups: + alert_group.raw_escalation_snapshot = alert_group.build_raw_escalation_snapshot() + alert_group.raw_escalation_snapshot["last_active_escalation_policy_order"] = 1 + alert_group.save() + + now = timezone.now() + # alert_group1: wait, notify user, notification successful + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group1, + notification_policy=user_notification_policy, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + notification_step=UserNotificationPolicy.Step.WAIT, + ) + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group1, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group1, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_SUCCESS, + ) + # records created > 5 mins ago + alert_group1.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7)) + + # alert_group2: notify user, notification failed + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group2, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group2, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + ) + # records created > 5 mins ago + alert_group2.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7)) + + # alert_group3: notify user, missing completion + make_user_notification_policy_log_record( + author=user, + alert_group=alert_group3, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + # record created > 5 mins ago + alert_group3.personal_log_records.update(created_at=now - timezone.timedelta(minutes=7)) + + # alert_group4: notify user created > 5 mins ago, missing completion + make_user_notification_policy_log_record( + author=user, + created_at=now - timezone.timedelta(minutes=3), + alert_group=alert_group3, + notification_policy=user_notification_policy, + notification_step=UserNotificationPolicy.Step.NOTIFY, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + # record created < 5 mins ago + alert_group4.personal_log_records.update(created_at=now - timezone.timedelta(minutes=2)) + + # trigger task + with patch("apps.alerts.tasks.check_escalation_finished.check_personal_notifications_task") as mock_check_notif: + check_escalation_finished_task() + + for alert_group in alert_groups: + mock_check_notif.apply_async.assert_any_call((alert_group.id,)) + check_personal_notifications_task(alert_group.id) + if alert_group == alert_group3: + assert f"Alert group {alert_group3.id} has (1) uncompleted personal notifications" in caplog.text + else: + assert f"Alert group {alert_group.id} personal notifications check passed" in caplog.text + + mocked_send_alert_group_escalation_auditor_task_heartbeat.assert_called() diff --git a/engine/apps/alerts/tests/test_notify_user.py b/engine/apps/alerts/tests/test_notify_user.py index 98d8bc2b..4643f0c2 100644 --- a/engine/apps/alerts/tests/test_notify_user.py +++ b/engine/apps/alerts/tests/test_notify_user.py @@ -240,3 +240,48 @@ def test_perform_notification_reason_to_skip_escalation_in_slack( else: mocked_send_slack_notification.assert_called() assert last_log_record.type != UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED + + +@pytest.mark.django_db +def test_perform_notification_slack_prevent_posting( + make_organization, + make_slack_team_identity, + make_user, + make_user_notification_policy, + make_alert_receive_channel, + make_alert_group, + make_user_notification_policy_log_record, + make_slack_message, +): + organization = make_organization() + slack_team_identity = make_slack_team_identity() + organization.slack_team_identity = slack_team_identity + organization.save() + user = make_user(organization=organization) + user_notification_policy = make_user_notification_policy( + user=user, + step=UserNotificationPolicy.Step.NOTIFY, + notify_by=UserNotificationPolicy.NotificationChannel.SLACK, + ) + alert_receive_channel = make_alert_receive_channel(organization=organization) + alert_group = make_alert_group(alert_receive_channel=alert_receive_channel) + log_record = make_user_notification_policy_log_record( + author=user, + alert_group=alert_group, + notification_policy=user_notification_policy, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + slack_prevent_posting=True, + ) + make_slack_message(alert_group=alert_group, channel_id="test_channel_id", slack_id="test_slack_id") + + with patch.object(SlackMessage, "send_slack_notification") as mocked_send_slack_notification: + perform_notification(log_record.pk) + + mocked_send_slack_notification.assert_not_called() + last_log_record = UserNotificationPolicyLogRecord.objects.last() + assert last_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED + assert last_log_record.reason == "Prevented from posting in Slack" + assert ( + last_log_record.notification_error_code + == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_POSTING_TO_SLACK_IS_DISABLED + ) diff --git a/engine/settings/celery_task_routes.py b/engine/settings/celery_task_routes.py index a1f45836..2252733e 100644 --- a/engine/settings/celery_task_routes.py +++ b/engine/settings/celery_task_routes.py @@ -122,6 +122,7 @@ CELERY_TASK_ROUTES = { "apps.alerts.tasks.alert_group_web_title_cache.update_web_title_cache_for_alert_receive_channel": {"queue": "long"}, "apps.alerts.tasks.alert_group_web_title_cache.update_web_title_cache": {"queue": "long"}, "apps.alerts.tasks.check_escalation_finished.check_escalation_finished_task": {"queue": "long"}, + "apps.alerts.tasks.check_escalation_finished.check_personal_notifications_task": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.cleanup_organization_async": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_cleanup_deleted_organizations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"}, From 0421bc472a4e9e8e56cf1d8f93e5316586bde294 Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Tue, 19 Dec 2023 07:05:57 +0100 Subject: [PATCH 08/16] Fix posting slack message about ratelimits (#3582) # What this PR does ## Which issue(s) this PR fixes https://github.com/grafana/oncall-private/issues/2374 ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 1 + engine/apps/integrations/tasks.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b487ef..29d012db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562)) - Fix alert group table columns validation @Ferril ([#3577](https://github.com/grafana/oncall/pull/3577)) +- Fix posting message about rate limit to Slack @Ferril ([#3582](https://github.com/grafana/oncall/pull/3582)) ## v1.3.80 (2023-12-14) diff --git a/engine/apps/integrations/tasks.py b/engine/apps/integrations/tasks.py index 476e9c58..4ce84c2e 100644 --- a/engine/apps/integrations/tasks.py +++ b/engine/apps/integrations/tasks.py @@ -169,6 +169,6 @@ def notify_about_integration_ratelimit_in_slack(organization_id, text, **kwargs) if slack_team_identity is not None: try: sc = SlackClient(slack_team_identity, enable_ratelimit_retry=True) - sc.chat_postMessage(channel=organization.general_log_channel_id, text=text, team=slack_team_identity) + sc.chat_postMessage(channel=organization.general_log_channel_id, text=text) except SlackAPIError as e: logger.warning(f"Slack exception {e} while sending message for organization {organization_id}") From 006682d0b76c2b5807b11027baaa314bf8472b32 Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Tue, 19 Dec 2023 09:13:07 -0500 Subject: [PATCH 09/16] fix PUT /api/v1/escalation_policies/ issue related to updating from_time and to_time (#3581) # Which issue(s) this PR fixes Closes https://github.com/grafana/oncall-private/issues/2373 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 4 ++ .../serializers/escalation_policies.py | 11 +++-- .../tests/test_escalation_policies.py | 38 ++++++++++++++++ engine/common/api_helpers/custom_fields.py | 21 --------- .../escalationChains/escalationPolicy.test.ts | 44 ++++++++++++++++++- .../e2e-tests/utils/escalationChain.ts | 3 +- .../src/components/TimeRange/TimeRange.tsx | 12 +++-- 7 files changed, 103 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29d012db..eb8c14d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add backend for multi-stack support for mobile-app @Ferril ([#3500](https://github.com/grafana/oncall/pull/3500)) +### Fixed + +- Fix PUT /api/v1/escalation_policies/id issue when updating `from_time` and `to_time` by @joeyorlando ([#3581](https://github.com/grafana/oncall/pull/3581)) + ## v1.3.78 (2023-12-12) ### Changed diff --git a/engine/apps/public_api/serializers/escalation_policies.py b/engine/apps/public_api/serializers/escalation_policies.py index 35c2b616..a100d6fb 100644 --- a/engine/apps/public_api/serializers/escalation_policies.py +++ b/engine/apps/public_api/serializers/escalation_policies.py @@ -10,7 +10,6 @@ from apps.slack.models import SlackUserGroup from apps.user_management.models import User from apps.webhooks.models import Webhook from common.api_helpers.custom_fields import ( - CustomTimeField, OrganizationFilteredPrimaryKeyRelatedField, UsersFilteredByOrganizationField, ) @@ -78,8 +77,14 @@ class EscalationPolicySerializer(EagerLoadingMixin, OrderedModelSerializer): source="custom_webhook", ) important = serializers.BooleanField(required=False) - notify_if_time_from = CustomTimeField(required=False, source="from_time") - notify_if_time_to = CustomTimeField(required=False, source="to_time") + + TIME_FORMAT = "%H:%M:%SZ" + notify_if_time_from = serializers.TimeField( + required=False, source="from_time", format=TIME_FORMAT, input_formats=[TIME_FORMAT] + ) + notify_if_time_to = serializers.TimeField( + required=False, source="to_time", format=TIME_FORMAT, input_formats=[TIME_FORMAT] + ) class Meta: model = EscalationPolicy diff --git a/engine/apps/public_api/tests/test_escalation_policies.py b/engine/apps/public_api/tests/test_escalation_policies.py index 1a9a19c2..a04a2440 100644 --- a/engine/apps/public_api/tests/test_escalation_policies.py +++ b/engine/apps/public_api/tests/test_escalation_policies.py @@ -418,3 +418,41 @@ def test_update_escalation_policy_using_button_to_webhook( assert response.data == serializer.data # step is migrated assert escalation_policy.step == EscalationPolicy.STEP_TRIGGER_CUSTOM_WEBHOOK + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "value,expected_status", + [ + (5, status.HTTP_400_BAD_REQUEST), + ("5", status.HTTP_400_BAD_REQUEST), + ("5:00", status.HTTP_400_BAD_REQUEST), + ("05:00:00", status.HTTP_400_BAD_REQUEST), + ("05:00:00Z", status.HTTP_200_OK), + ], +) +def test_update_escalation_policy_from_and_to_time( + make_organization_and_user_with_token, + make_escalation_chain, + make_escalation_policy, + value, + expected_status, +): + organization, _, token = make_organization_and_user_with_token() + escalation_chain = make_escalation_chain(organization) + escalation_policy = make_escalation_policy(escalation_chain, EscalationPolicy.STEP_NOTIFY_IF_TIME) + + client = APIClient() + url = reverse("api-public:escalation_policies-detail", kwargs={"pk": escalation_policy.public_primary_key}) + + for field in ["notify_if_time_from", "notify_if_time_to"]: + response = client.put(url, data={field: value}, format="json", HTTP_AUTHORIZATION=token) + + assert response.status_code == expected_status + + if expected_status == status.HTTP_200_OK: + escalation_policy = EscalationPolicy.objects.get(public_primary_key=response.data["id"]) + serializer = EscalationPolicySerializer(escalation_policy) + assert response.data == serializer.data + else: + assert response.json()[field][0] == "Time has wrong format. Use one of these formats instead: hh:mm:ssZ." diff --git a/engine/common/api_helpers/custom_fields.py b/engine/common/api_helpers/custom_fields.py index 32f5736b..dcfc1bb8 100644 --- a/engine/common/api_helpers/custom_fields.py +++ b/engine/common/api_helpers/custom_fields.py @@ -1,5 +1,3 @@ -import time - from django.core.exceptions import ObjectDoesNotExist from rest_framework import fields, serializers from rest_framework.exceptions import ValidationError @@ -102,25 +100,6 @@ class UsersFilteredByOrganizationField(serializers.Field): return queryset.filter(organization=request.user.organization, public_primary_key__in=data).distinct() -class CustomTimeField(fields.TimeField): - def to_representation(self, value): - result = super().to_representation(value) - if result[-1] != "Z": - result += "Z" - return result - - def to_internal_value(self, data): - TIME_FORMAT_LEN = len("00:00:00Z") - if len(data) == TIME_FORMAT_LEN: - try: - time.strptime(data, "%H:%M:%SZ") - except ValueError: - raise BadRequest(detail="Invalid time format, should be '00:00:00Z'") - else: - raise BadRequest(detail="Invalid time format, should be '00:00:00Z'") - return data - - class RouteIdField(fields.CharField): def to_internal_value(self, data): try: diff --git a/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts b/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts index 70b10b3e..ecf44db0 100644 --- a/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts +++ b/grafana-plugin/e2e-tests/escalationChains/escalationPolicy.test.ts @@ -1,4 +1,4 @@ -import { expect, test } from '../fixtures'; +import { Locator, expect, test } from '../fixtures'; import { createEscalationChain, EscalationStep, selectEscalationStepValue } from '../utils/escalationChain'; import { generateRandomValue } from '../utils/forms'; @@ -15,3 +15,45 @@ test('escalation policy does not go back to "Default" after adding users to noti await page.reload(); await expect(page.getByText('Important')).toBeVisible(); }); + +// TODO: unskip when https://github.com/grafana/oncall/issues/3585 is patched +test.skip('from_time and to_time for "Continue escalation if current UTC time is in range" escalation step type can be properly updated', async ({ + adminRolePage, +}) => { + const FROM_TIME = '10:31'; + const TO_TIME = '10:32'; + + const { page } = adminRolePage; + const escalationChainName = generateRandomValue(); + + // create escalation step w/ Continue escalation if current UTC time is in policy step + await createEscalationChain(page, escalationChainName, EscalationStep.ContinueEscalationIfCurrentUTCTimeIsIn); + + const _getFromTimeInput = () => page.locator('[data-testid="time-range-from"] >> input'); + const _getToTimeInput = () => page.locator('[data-testid="time-range-to"] >> input'); + + const clickAndInputValue = async (locator: Locator, value: string) => { + // the first click opens up dropdown which contains the time selector scrollable lists + await locator.click(); + + // the second click focuses on the input where we can actually type the time instead, much easier + const actualInput = page.locator('input[class="rc-time-picker-panel-input"]'); + await actualInput.click(); + await actualInput.selectText(); + await actualInput.fill(value); + + // click anywhere to close the dropdown + await page.click('body'); + }; + + // update from and to time values + await clickAndInputValue(_getFromTimeInput(), FROM_TIME); + await clickAndInputValue(_getToTimeInput(), TO_TIME); + + // reload and check that these values have been persisted + await page.reload(); + await page.waitForLoadState('networkidle'); + + expect(await _getFromTimeInput().textContent()).toBe(FROM_TIME); + expect(await _getToTimeInput().textContent()).toBe(FROM_TIME); +}); diff --git a/grafana-plugin/e2e-tests/utils/escalationChain.ts b/grafana-plugin/e2e-tests/utils/escalationChain.ts index 74115d43..c7e5c772 100644 --- a/grafana-plugin/e2e-tests/utils/escalationChain.ts +++ b/grafana-plugin/e2e-tests/utils/escalationChain.ts @@ -6,9 +6,10 @@ import { goToOnCallPage } from './navigation'; export enum EscalationStep { NotifyUsers = 'Notify users', NotifyUsersFromOnCallSchedule = 'Notify users from on-call schedule', + ContinueEscalationIfCurrentUTCTimeIsIn = 'Continue escalation if current UTC time is in range', } -const escalationStepValuePlaceholder: Record = { +const escalationStepValuePlaceholder: Partial> = { [EscalationStep.NotifyUsers]: 'Select User', [EscalationStep.NotifyUsersFromOnCallSchedule]: 'Select Schedule', }; diff --git a/grafana-plugin/src/components/TimeRange/TimeRange.tsx b/grafana-plugin/src/components/TimeRange/TimeRange.tsx index b48e7435..28736b83 100644 --- a/grafana-plugin/src/components/TimeRange/TimeRange.tsx +++ b/grafana-plugin/src/components/TimeRange/TimeRange.tsx @@ -92,11 +92,15 @@ const TimeRange = (props: TimeRangeProps) => { return (
- {/* @ts-ignore actually TimeOfDayPicker uses Moment objects */} - +
+ {/* @ts-ignore actually TimeOfDayPicker uses Moment objects */} + +
to - {/* @ts-ignore actually TimeOfDayPicker uses Moment objects */} - +
+ {/* @ts-ignore actually TimeOfDayPicker uses Moment objects */} + +
{showNextDayTip && 'next day'}
From 647d46294c0a9b6e5f5b35e8d931d4f7e0ec0254 Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Wed, 20 Dec 2023 14:40:47 +0100 Subject: [PATCH 10/16] Fix inbound email integration endpoint (#3586) # What this PR does Handle exception on parsing sender email address from email message for inbound email integration endpoint ## Which issue(s) this PR fixes https://github.com/grafana/oncall-private/issues/2398 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 1 + engine/apps/email/inbound.py | 16 ++++++++++++++-- engine/apps/email/tests/test_inbound_email.py | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb8c14d7..d9d25cc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Check reason to skip notification in Slack to avoid task perform_notification retries @Ferril ([#3562](https://github.com/grafana/oncall/pull/3562)) - Fix alert group table columns validation @Ferril ([#3577](https://github.com/grafana/oncall/pull/3577)) - Fix posting message about rate limit to Slack @Ferril ([#3582](https://github.com/grafana/oncall/pull/3582)) +- Fix issue with parsing sender email address from email message for inbound email integration endpoint @Ferril ([#3586](https://github.com/grafana/oncall/pull/3586)) ## v1.3.80 (2023-12-14) diff --git a/engine/apps/email/inbound.py b/engine/apps/email/inbound.py index baa89905..5a37daa3 100644 --- a/engine/apps/email/inbound.py +++ b/engine/apps/email/inbound.py @@ -1,7 +1,7 @@ import logging from typing import Optional, TypedDict -from anymail.exceptions import AnymailWebhookValidationFailure +from anymail.exceptions import AnymailInvalidAddress, AnymailWebhookValidationFailure from anymail.inbound import AnymailInboundMessage from anymail.signals import AnymailInboundEvent from anymail.webhooks import amazon_ses, mailgun, mailjet, mandrill, postal, postmark, sendgrid, sparkpost @@ -140,6 +140,18 @@ class InboundEmailWebhookView(AlertChannelDefiningMixin, APIView): subject = subject.strip() message = email.text or "" message = message.strip() - sender = email.from_email.addr_spec + sender = self.get_sender_from_email_message(email) return {"subject": subject, "message": message, "sender": sender} + + def get_sender_from_email_message(self, email: AnymailInboundMessage) -> str: + try: + sender = email.from_email.addr_spec + except AnymailInvalidAddress as e: + # wasn't able to parse email address from message, return raw value from "From" header + logger.warning( + f"get_sender_from_email_message: issue during parsing sender from email message, getting raw value " + f"instead. Exception: {e}" + ) + sender = ", ".join(email.get_all("From")) + return sender diff --git a/engine/apps/email/tests/test_inbound_email.py b/engine/apps/email/tests/test_inbound_email.py index 91951139..a47b19f9 100644 --- a/engine/apps/email/tests/test_inbound_email.py +++ b/engine/apps/email/tests/test_inbound_email.py @@ -1,10 +1,13 @@ import json import pytest +from anymail.inbound import AnymailInboundMessage from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient +from apps.email.inbound import InboundEmailWebhookView + @pytest.mark.django_db def test_amazon_ses_provider_load(settings, make_organization_and_user_with_token, make_alert_receive_channel): @@ -52,3 +55,18 @@ def test_amazon_ses_provider_load(settings, make_organization_and_user_with_toke ) assert response.status_code == status.HTTP_200_OK + + +@pytest.mark.parametrize( + "sender_value,expected_result", + [ + ("'Alex Smith' ", "test@example.com"), + ("'Alex Smith' via [TEST] mail ", "'Alex Smith' via [TEST] mail "), + ], +) +def test_get_sender_from_email_message(sender_value, expected_result): + email = AnymailInboundMessage() + email["From"] = sender_value + view = InboundEmailWebhookView() + result = view.get_sender_from_email_message(email) + assert result == expected_result From bcbca9d3b9e364dc8706c6672709298e23060c92 Mon Sep 17 00:00:00 2001 From: Ravishankar Date: Wed, 20 Dec 2023 19:19:50 +0530 Subject: [PATCH 11/16] fix(3564) Support PATCH Method In Outgoing webhook (#3580) # What this PR does Adds PATCH method Support for outgoing webhook ## Which issue(s) this PR fixes Fixes #3564 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --------- Co-authored-by: Joey Orlando --- CHANGELOG.md | 1 + engine/apps/api/tests/test_webhooks.py | 10 ++++++++-- engine/apps/webhooks/models/webhook.py | 6 ++++-- engine/apps/webhooks/tests/test_webhook.py | 2 +- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9d25cc4..186e4e79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516)) +- Support PATCH method for outgoing webhooks by @ravishankar15 ([#3580](https://github.com/grafana/oncall/pull/3580)) ### Changed diff --git a/engine/apps/api/tests/test_webhooks.py b/engine/apps/api/tests/test_webhooks.py index 74949b2a..278f7870 100644 --- a/engine/apps/api/tests/test_webhooks.py +++ b/engine/apps/api/tests/test_webhooks.py @@ -701,7 +701,10 @@ def test_create_invalid_missing_fields(webhook_internal_api_setup, make_user_aut } response = client.post(url, data, format="json", **make_user_auth_headers(user, token)) assert response.status_code == status.HTTP_400_BAD_REQUEST - assert response.json()["http_method"][0] == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']." + assert ( + response.json()["http_method"][0] + == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']." + ) data = { "name": "test webhook 3", @@ -711,7 +714,10 @@ def test_create_invalid_missing_fields(webhook_internal_api_setup, make_user_aut } response = client.post(url, data, format="json", **make_user_auth_headers(user, token)) assert response.status_code == status.HTTP_400_BAD_REQUEST - assert response.json()["http_method"][0] == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS']." + assert ( + response.json()["http_method"][0] + == "This field must be one of ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']." + ) data = {"name": "test webhook 3", "url": TEST_URL, "trigger_type": 2000000, "http_method": "POST"} response = client.post(url, data, format="json", **make_user_auth_headers(user, token)) diff --git a/engine/apps/webhooks/models/webhook.py b/engine/apps/webhooks/models/webhook.py index 1219d923..425fcda3 100644 --- a/engine/apps/webhooks/models/webhook.py +++ b/engine/apps/webhooks/models/webhook.py @@ -32,7 +32,7 @@ if typing.TYPE_CHECKING: from apps.alerts.models import EscalationPolicy WEBHOOK_FIELD_PLACEHOLDER = "****************" -PUBLIC_WEBHOOK_HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS"] +PUBLIC_WEBHOOK_HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"] logger = get_task_logger(__name__) logger.setLevel(logging.DEBUG) @@ -186,7 +186,7 @@ class Webhook(models.Model): if self.authorization_header: request_kwargs["headers"]["Authorization"] = self.authorization_header - if self.http_method in ["POST", "PUT"]: + if self.http_method in ["POST", "PUT", "PATCH"]: if self.forward_all: request_kwargs["json"] = event_data if self.is_legacy: @@ -255,6 +255,8 @@ class Webhook(models.Model): r = requests.delete(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) elif self.http_method == "OPTIONS": r = requests.options(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) + elif self.http_method == "PATCH": + r = requests.patch(url, timeout=OUTGOING_WEBHOOK_TIMEOUT, **request_kwargs) else: raise ValueError(f"Unsupported http method: {self.http_method}") return r diff --git a/engine/apps/webhooks/tests/test_webhook.py b/engine/apps/webhooks/tests/test_webhook.py index 4c8cb69a..a86dd3b2 100644 --- a/engine/apps/webhooks/tests/test_webhook.py +++ b/engine/apps/webhooks/tests/test_webhook.py @@ -231,7 +231,7 @@ def test_make_request(make_organization, make_custom_webhook): organization = make_organization() with patch("apps.webhooks.models.webhook.requests") as mock_requests: - for method in ("GET", "POST", "PUT", "DELETE", "OPTIONS"): + for method in ("GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"): webhook = make_custom_webhook(organization=organization, http_method=method) webhook.make_request("url", {"foo": "bar"}) expected_call = getattr(mock_requests, method.lower()) From 9657533b5b5f05c28cede2e17ae12684e694a2aa Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Fri, 22 Dec 2023 07:36:54 -0500 Subject: [PATCH 12/16] fix duplicate teams showing up in teams dropdown for `/escalate` slack command (#3590) # Which issue(s) this PR fixes - Closes https://github.com/grafana/support-escalations/issues/8763 - Closes https://github.com/grafana/oncall/issues/3388 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- CHANGELOG.md | 7 ++--- .../tests/test_scenario_steps/test_paging.py | 30 +++++++++++++++---- .../user_management/models/organization.py | 24 ++++++++------- .../tests/test_organization.py | 9 ++++++ 4 files changed, 50 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 186e4e79..e2b3eef5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fix alert group table columns validation @Ferril ([#3577](https://github.com/grafana/oncall/pull/3577)) - Fix posting message about rate limit to Slack @Ferril ([#3582](https://github.com/grafana/oncall/pull/3582)) - Fix issue with parsing sender email address from email message for inbound email integration endpoint @Ferril ([#3586](https://github.com/grafana/oncall/pull/3586)) +- Fix PUT /api/v1/escalation_policies/id issue when updating `from_time` and `to_time` by @joeyorlando ([#3581](https://github.com/grafana/oncall/pull/3581)) +- Fix issue where duplicate team options would show up in the teams dropdown for the `/escalate` Slack command + by @joeyorlando ([#3590](https://github.com/grafana/oncall/pull/3590)) ## v1.3.80 (2023-12-14) @@ -35,10 +38,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add backend for multi-stack support for mobile-app @Ferril ([#3500](https://github.com/grafana/oncall/pull/3500)) -### Fixed - -- Fix PUT /api/v1/escalation_policies/id issue when updating `from_time` and `to_time` by @joeyorlando ([#3581](https://github.com/grafana/oncall/pull/3581)) - ## v1.3.78 (2023-12-12) ### Changed diff --git a/engine/apps/slack/tests/test_scenario_steps/test_paging.py b/engine/apps/slack/tests/test_scenario_steps/test_paging.py index 64c0c29e..d665328a 100644 --- a/engine/apps/slack/tests/test_scenario_steps/test_paging.py +++ b/engine/apps/slack/tests/test_scenario_steps/test_paging.py @@ -284,6 +284,9 @@ def test_get_team_select_blocks( input_id_prefix = "nmxcnvmnxv" + def _contstruct_team_option(team): + return {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)} + # no team selected - no team direct paging integrations available organization, _, _, slack_user_identity = make_organization_and_user_with_slack_identities() blocks = _get_team_select_blocks(slack_user_identity, organization, False, None, input_id_prefix) @@ -309,11 +312,9 @@ def test_get_team_select_blocks( assert len(blocks) == 2 input_block, context_block = blocks - team_option = {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)} - assert input_block["type"] == "input" assert len(input_block["element"]["options"]) == 1 - assert input_block["element"]["options"] == [team_option] + assert input_block["element"]["options"] == [_contstruct_team_option(team)] assert context_block["elements"][0]["text"] == info_msg # team selected @@ -337,9 +338,6 @@ def test_get_team_select_blocks( assert len(blocks) == 2 input_block, context_block = blocks - def _contstruct_team_option(team): - return {"text": {"emoji": True, "text": team.name, "type": "plain_text"}, "value": str(team.pk)} - team1_option = _contstruct_team_option(team1) team2_option = _contstruct_team_option(team2) @@ -355,3 +353,23 @@ def test_get_team_select_blocks( context_block["elements"][0]["text"] == f"Integration <{team2_direct_paging_arc.web_link}|{team2_direct_paging_arc.verbal_name}> will be used for notification." ) + + # team's direct paging integration has two routes associated with it + # the team should only be displayed once + organization, _, _, slack_user_identity = make_organization_and_user_with_slack_identities() + team = make_team(organization) + + arc = make_alert_receive_channel(organization, team=team, integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING) + escalation_chain = make_escalation_chain(organization) + make_channel_filter(arc, is_default=True, escalation_chain=escalation_chain) + make_channel_filter(arc, escalation_chain=escalation_chain) + + blocks = _get_team_select_blocks(slack_user_identity, organization, False, None, input_id_prefix) + + assert len(blocks) == 2 + input_block, context_block = blocks + + assert input_block["type"] == "input" + assert len(input_block["element"]["options"]) == 1 + assert input_block["element"]["options"] == [_contstruct_team_option(team)] + assert context_block["elements"][0]["text"] == info_msg diff --git a/engine/apps/user_management/models/organization.py b/engine/apps/user_management/models/organization.py index 9d550ab2..ef4bb8d9 100644 --- a/engine/apps/user_management/models/organization.py +++ b/engine/apps/user_management/models/organization.py @@ -323,16 +323,20 @@ class Organization(MaintainableObject): """ from apps.alerts.models import AlertReceiveChannel - return self.alert_receive_channels.annotate( - num_channel_filters=Count("channel_filters"), - # used to determine if the organization has telegram configured - num_org_telegram_channels=Count("organization__telegram_channel"), - ).filter( - Q(num_channel_filters__gt=1) - | (Q(organization__slack_team_identity__isnull=False) | Q(num_org_telegram_channels__gt=0)) - | Q(channel_filters__is_default=True, channel_filters__escalation_chain__isnull=False) - | Q(channel_filters__is_default=True, channel_filters__notification_backends__isnull=False), - integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING, + return ( + self.alert_receive_channels.annotate( + num_channel_filters=Count("channel_filters"), + # used to determine if the organization has telegram configured + num_org_telegram_channels=Count("organization__telegram_channel"), + ) + .filter( + Q(num_channel_filters__gt=1) + | (Q(organization__slack_team_identity__isnull=False) | Q(num_org_telegram_channels__gt=0)) + | Q(channel_filters__is_default=True, channel_filters__escalation_chain__isnull=False) + | Q(channel_filters__is_default=True, channel_filters__notification_backends__isnull=False), + integration=AlertReceiveChannel.INTEGRATION_DIRECT_PAGING, + ) + .distinct() ) @property diff --git a/engine/apps/user_management/tests/test_organization.py b/engine/apps/user_management/tests/test_organization.py index 91915500..58636f88 100644 --- a/engine/apps/user_management/tests/test_organization.py +++ b/engine/apps/user_management/tests/test_organization.py @@ -217,6 +217,7 @@ def test_get_notifiable_direct_paging_integrations( assert arc in notifiable_direct_paging_integrations else: assert arc not in notifiable_direct_paging_integrations + return notifiable_direct_paging_integrations # integration has no default channel filter org, arc = _make_org_and_arc() @@ -269,3 +270,11 @@ def test_get_notifiable_direct_paging_integrations( escalation_chain = make_escalation_chain(org) make_channel_filter(arc, is_default=True, notify_in_slack=False, escalation_chain=escalation_chain) _assert(org, arc) + + # integration has more than one channel filter associated with it, nevertheless the integration should only + # be returned once + org, arc = _make_org_and_arc() + make_channel_filter(arc, is_default=True) + make_channel_filter(arc, is_default=False) + notifiable_direct_paging_integrations = _assert(org, arc) + assert notifiable_direct_paging_integrations.count() == 1 From 83d7f3368d76f9b8b78c09a9b98da367c72c43c4 Mon Sep 17 00:00:00 2001 From: Maxim Mordasov Date: Thu, 28 Dec 2023 16:58:35 +0300 Subject: [PATCH 13/16] Hide "Limit each shift length" control for month-based rotations (#3599) # What this PR does Hide "Limit each shift length" control for month-based rotations ## Which issue(s) this PR fixes https://github.com/grafana/support-escalations/issues/8874 ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- grafana-plugin/src/containers/RotationForm/RotationForm.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/grafana-plugin/src/containers/RotationForm/RotationForm.tsx b/grafana-plugin/src/containers/RotationForm/RotationForm.tsx index a2810be9..6746fdd4 100644 --- a/grafana-plugin/src/containers/RotationForm/RotationForm.tsx +++ b/grafana-plugin/src/containers/RotationForm/RotationForm.tsx @@ -388,6 +388,7 @@ const RotationForm = observer((props: RotationFormProps) => { setShowActiveOnSelectedDays(Boolean(shift.by_day?.length)); const activeOnSelectedPartOfDay = + shift.frequency !== RepeatEveryPeriod.MONTHS && repeatEveryInSeconds(shift.frequency, shift.interval) !== shiftEnd.diff(shiftStart, 'seconds'); setShowActiveOnSelectedPartOfDay(activeOnSelectedPartOfDay); From da47c029901fe599356f310f479abba0e988f0ff Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Thu, 28 Dec 2023 09:14:43 -0500 Subject: [PATCH 14/16] use forked version of redis-py which adds extra debug logging (#3600) # Which issue(s) this PR fixes This helps with debugging https://github.com/grafana/oncall-private/issues/2406 (**note**: it doesn't fix it) ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated (N/A) - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) --- engine/requirements.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/engine/requirements.txt b/engine/requirements.txt index b182216f..2069ce83 100644 --- a/engine/requirements.txt +++ b/engine/requirements.txt @@ -5,7 +5,10 @@ whitenoise==5.3.0 twilio~=6.37.0 phonenumbers==8.10.0 celery[amqp,redis]==5.3.1 -redis==5.0.1 +# NOTE: temporarily installing a forked version of redis-py which adds some more debug logging +# in an effort to fix https://github.com/grafana/oncall-private/issues/2406 +# revert this change once done debugging +git+https://github.com/grafana/redis-py@c0f167c humanize==0.5.1 uwsgi==2.0.21 django-cors-headers==3.7.0 From 955261ffc08afe5978982cfb9a238ff7af92f78b Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Thu, 28 Dec 2023 09:35:37 -0500 Subject: [PATCH 15/16] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2b3eef5..cf1ab849 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +## v1.3.80 (2023-12-28) + ### Added - Support e2e tests in Tilt and Makefile ([#3516](https://github.com/grafana/oncall/pull/3516)) From 33fcb87c5166c6ca16d6e1f5ccbc7928be2142e9 Mon Sep 17 00:00:00 2001 From: Joey Orlando Date: Thu, 28 Dec 2023 09:36:26 -0500 Subject: [PATCH 16/16] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf1ab849..a34eb196 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased -## v1.3.80 (2023-12-28) +## v1.3.81 (2023-12-28) ### Added