diff --git a/.dockerignore b/.dockerignore index e541b3d4..6561a0ad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,4 +5,5 @@ frontend/node_modules frontend/build package-lock.json ./engine/extensions -.env \ No newline at end of file +.env +.env-hobby diff --git a/.drone.yml b/.drone.yml index 4fc14748..3fd48008 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,3 +1,4 @@ +--- kind: pipeline type: docker name: Build and Release @@ -30,8 +31,8 @@ steps: - yarn ci-build:finish - yarn ci-package - cd ci/dist - - zip -r grafana-oncall-app-${DRONE_BRANCH}-${DRONE_BUILD_NUMBER}.zip ./grafana-oncall-app - - if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app-${DRONE_BRANCH}-${DRONE_BUILD_NUMBER}.zip grafana-oncall-app-${DRONE_TAG}.zip; fi + - zip -r grafana-oncall-app.zip ./grafana-oncall-app + - if [ -z "$DRONE_TAG" ]; then echo "No tag, skipping archive"; else cp grafana-oncall-app.zip grafana-oncall-app-${DRONE_TAG}.zip; fi - name: Publish Plugin to GCS (release) image: plugins/gcs @@ -156,8 +157,13 @@ services: trigger: event: - - push + include: - tag + - push + ref: + include: + - refs/heads/** + - refs/tags/v*.*.* --- # Secret for pulling docker images. @@ -227,4 +233,9 @@ get: name: machine-user-token path: infra/data/ci/drone kind: secret -name: drone_token \ No newline at end of file +name: drone_token +--- +kind: signature +hmac: 5cdafa5ca416acb1763d1d9ac93bbd932982c874718f40af533914a6711c1a1f + +... diff --git a/.env.example b/.env.example index 6be42e97..b8794c10 100644 --- a/.env.example +++ b/.env.example @@ -21,7 +21,7 @@ DJANGO_SETTINGS_MODULE=settings.dev SECRET_KEY=jkashdkjashdkjh BASE_URL=http://localhost:8000 -FEATURE_TELEGRAM_INTEGRATION_ENABLED= +FEATURE_TELEGRAM_INTEGRATION_ENABLED=True FEATURE_SLACK_INTEGRATION_ENABLED=True FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED= diff --git a/.github/workflows/publish_docs.yml b/.github/workflows/publish_docs.yml index 20189561..2eb7dec4 100644 --- a/.github/workflows/publish_docs.yml +++ b/.github/workflows/publish_docs.yml @@ -17,24 +17,24 @@ jobs: - name: Build Website run: | docker run -v ${PWD}/sources:/hugo/content/docs/amixr --rm grafana/docs-base:latest /bin/bash -c 'make hugo' - sync: - runs-on: ubuntu-latest - needs: test - if: github.ref == 'refs/heads/main' - steps: - - uses: actions/checkout@v1 - - run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync - - name: publish-to-git - uses: ./.github/actions/website-sync - id: publish - with: - repository: grafana/website - branch: master - host: github.com - github_pat: '${{ secrets.GH_BOT_ACCESS_TOKEN }}' - source_folder: docs/sources - target_folder: content/docs/amixr/v0.0.39 - - shell: bash - run: | - test -n "${{ steps.publish.outputs.commit_hash }}" - test -n "${{ steps.publish.outputs.working_directory }}" \ No newline at end of file +# sync: +# runs-on: ubuntu-latest +# needs: test +# if: github.ref == 'refs/heads/main' +# steps: +# - uses: actions/checkout@v1 +# - run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync +# - name: publish-to-git +# uses: ./.github/actions/website-sync +# id: publish +# with: +# repository: grafana/website +# branch: master +# host: github.com +# github_pat: '${{ secrets.GH_BOT_ACCESS_TOKEN }}' +# source_folder: docs/sources +# target_folder: content/docs/amixr/v0.0.39 +# - shell: bash +# run: | +# test -n "${{ steps.publish.outputs.commit_hash }}" +# test -n "${{ steps.publish.outputs.working_directory }}" diff --git a/.gitignore b/.gitignore index ae81aab5..b00b88a2 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.pyc venv .env +.env_hobby .vscode dump.rdb .idea diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..3d4caa4f --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at conduct@grafana.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/DEVELOPER.md b/DEVELOPER.md index fd4da888..8af642f3 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -28,7 +28,7 @@ 1. Start stateful services (RabbitMQ, Redis, Grafana with mounted plugin folder) ```bash -docker-compose -f developer-docker-compose.yml up -d +docker-compose -f docker-compose-developer.yml up -d ``` 2. Prepare a python environment: @@ -53,13 +53,10 @@ export $(grep -v '^#' .env | xargs -0) # Hint: there is a known issue with uwsgi. It's not used in the local dev environment. Feel free to comment it in `engine/requirements.txt`. cd engine && pip install -r requirements.txt -# Create folder for database -mkdir sqlite_data - # Migrate the DB: python manage.py migrate -# Create user for django admin panel: +# Create user for django admin panel (if you need it): python manage.py createsuperuser ``` @@ -69,7 +66,7 @@ python manage.py createsuperuser # Http server: python manage.py runserver -# Worker for background tasks(run it in the parallel terminal, don't forget to export .env there) +# Worker for background tasks (run it in the parallel terminal, don't forget to export .env there) python manage.py start_celery # Additionally you could launch the worker with periodic tasks launcher (99% you don't need this) @@ -107,7 +104,7 @@ python manage.py issue_invite_for_the_frontend --override OnCall API URL: http://host.docker.internal:8000 -OnCall Invitation Token (Single use token to connect Grafana instance): +Invitation Token (Single use token to connect Grafana instance): Response from the invite generator command (check above) Grafana URL (URL OnCall will use to talk to Grafana instance): @@ -119,7 +116,7 @@ host IP from inside the container by running: ```bash /sbin/ip route|awk '/default/ { print $3 }' -# Alternatively add host.docker.internal as an extra_host for grafana in developer-docker-compose.yml +# Alternatively add host.docker.internal as an extra_host for grafana in docker-compose-developer.yml extra_hosts: - "host.docker.internal:host-gateway" @@ -127,161 +124,7 @@ extra_hosts: ### Slack application setup -This instruction is also applicable if you set up self-hosted OnCall. - -1. Start a [localtunnel](https://github.com/localtunnel/localtunnel) reverse proxy to make oncall engine api accessible to slack (if you don't have OnCall backend accessible from https), -```bash -# Choose the unique prefix instead of pretty-turkey-83 -# Localtunnel will generate an url, e.g. https://pretty-turkey-83.loca.lt -# it is referred as below -lt --port 8000 -s pretty-turkey-83 --print-requests -``` - -2. [Create a Slack Workspace](https://slack.com/create) for development. - -3. Go to https://api.slack.com/apps and click Create New App button - -4. Select `From an app manifest` option and choose the right workspace - -5. Copy and paste the following block with the correct and fields - -
- Click to expand! - - ```yaml - _metadata: - major_version: 1 - minor_version: 1 - display_information: - name: - features: - app_home: - home_tab_enabled: true - messages_tab_enabled: true - messages_tab_read_only_enabled: false - bot_user: - display_name: - always_online: true - shortcuts: - - name: Create a new incident - type: message - callback_id: incident_create - description: Creates a new OnCall incident - - name: Add to postmortem - type: message - callback_id: add_postmortem - description: Add this message to postmortem - slash_commands: - - command: /oncall - url: /slack/interactive_api_endpoint/ - description: oncall - should_escape: false - oauth_config: - redirect_urls: - - /api/internal/v1/complete/slack-install-free/ - - /api/internal/v1/complete/slack-login/ - scopes: - user: - - channels:read - - chat:write - - identify - - users.profile:read - bot: - - app_mentions:read - - channels:history - - channels:read - - chat:write - - chat:write.customize - - chat:write.public - - commands - - files:write - - groups:history - - groups:read - - im:history - - im:read - - im:write - - mpim:history - - mpim:read - - mpim:write - - reactions:write - - team:read - - usergroups:read - - usergroups:write - - users.profile:read - - users:read - - users:read.email - - users:write - settings: - event_subscriptions: - request_url: /slack/event_api_endpoint/ - bot_events: - - app_home_opened - - app_mention - - channel_archive - - channel_created - - channel_deleted - - channel_rename - - channel_unarchive - - member_joined_channel - - message.channels - - message.im - - subteam_created - - subteam_members_changed - - subteam_updated - - user_change - interactivity: - is_enabled: true - request_url: /slack/interactive_api_endpoint/ - org_deploy_enabled: false - socket_mode_enabled: false - ``` -
- -6. Click `Install to workspace` button to generate the credentials - -6. Populate the environment with variables related to Slack - - In your `.env` file, fill out the following variables: - - ``` - SLACK_CLIENT_OAUTH_ID = Basic Information -> App Credentials -> Client ID - SLACK_CLIENT_OAUTH_SECRET = Basic Information -> App Credentials -> Client Secret - SLACK_API_TOKEN = OAuth & Permissions -> Bot User OAuth Token - SLACK_INSTALL_RETURN_REDIRECT_HOST = https://pretty-turkey-83.loca.lt - ``` - - Don't forget to export variables from the `.env` file and restart the server! - -7. Edit `grafana-plugin/grafana-plugin.yml` to set `onCallApiUrl` fields with localtunnel url - ``` - onCallApiUrl: https://pretty-turkey-83.loca.lt - ``` - - or set BASE_URL Env variable through web interface. - -8. Edit grafana-plugin/src/plugin.json to add `Bypass-Tunnel-Reminder` header section for all existing routes - > this headers required for the local development only, otherwise localtunnel blocks requests from grafana plugin - - ``` - { - "path": ..., - ... - "headers": [ - ... - { - "name": "Bypass-Tunnel-Reminder", - "content": "True" - } - ] - }, - ``` -9. Rebuild the plugin - ``` - yarn watch - ``` -10. Restart grafana instance - -11. All set! Go to Slack and check if your application is functional. +For Slack app configuration check our docs: https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup ## Troubleshooting @@ -383,3 +226,18 @@ pytest --ds=settings.dev - Set Settings to settings/dev.py 5. Create a new Django Server run configuration to Run/Debug the engine - Use a plugin such as EnvFile to load the .env file + +## Update drone build +The .drone.yml build file must be signed when changes are made to it. Follow these steps: + +If you have not installed drone CLI follow [these instructions](https://docs.drone.io/cli/install/) + +To sign the .drone.yml file: +```bash +export DRONE_SERVER=https://drone.grafana.net + +# Get your drone token from https://drone.grafana.net/account +export DRONE_TOKEN= + +drone sign --save grafana/oncall .drone.yml +``` diff --git a/GOVERNANCE.md b/GOVERNANCE.md new file mode 100644 index 00000000..6b837d68 --- /dev/null +++ b/GOVERNANCE.md @@ -0,0 +1,159 @@ +--- +title: Governance +--- + +# Governance + +This document describes the rules and governance of the project. It is meant to be followed by all the developers of the project and the OnCall community. Common terminology used in this governance document are listed below: + +- **Team members**: Any members of the private [team mailing list][team]. + +- **Maintainers**: Maintainers lead an individual project or parts thereof ([`MAINTAINERS.md`][maintainers]). + +- **Projects**: A single repository in the Grafana GitHub organization and listed below is referred to as a project: + + - oncall + +- **The OnCall project**: The sum of all activities performed under this governance, concerning one or more repositories or the community. + +## Values + +The OnCall developers and community are expected to follow the values defined in the [Code of Conduct][coc]. Furthermore, the OnCall community strives for kindness, giving feedback effectively, and building a welcoming environment. The OnCall developers generally decide by consensus and only resort to conflict resolution by a majority vote if consensus cannot be reached. + +## Projects + +Each project must have a [`MAINTAINERS.md`][maintainers] file with at least one maintainer. Where a project has a release process, access and documentation should be such that more than one person can perform a release. Releases should be announced on the [announcements][https://github.com/grafana/oncall/discussions/categories/announcements] category at the GitHub Discussions. Any new projects should be first proposed on the [team mailing list][team] following the voting procedures listed below. + +## Decision making + +### Team members + +Team member status may be given to those who have made ongoing contributions to the OnCall project for at least 3 months. This is usually in the form of code improvements and/or notable work on documentation, but organizing events or user support could also be taken into account. + +New members may be proposed by any existing member by email to the [team mailing list][team]. It is highly desirable to reach consensus about acceptance of a new member. However, the proposal is ultimately voted on by a formal [supermajority vote](#supermajority-vote). + +If the new member proposal is accepted, the proposed team member should be contacted privately via email to confirm or deny their acceptance of team membership. This email will also be CC'd to the [team mailing list][team] for record-keeping purposes. + +If they choose to accept, the [onboarding](#onboarding) procedure is followed. + +Team members may retire at any time by emailing [the team][team]. + +Team members can be removed by [supermajority vote](#supermajority-vote) on [the team mailing list][team]. +For this vote, the member in question is not eligible to vote and does not count towards the quorum. +Any removal vote can cover only one single person. + +Upon death of a member, they leave the team automatically. + +In case a member leaves, the [offboarding](#offboarding) procedure is applied. + +The current team members are: + +- Ildar Iskhakov — [@iskhakov](https://github.com/iskhakov) ([Grafana Labs](https://grafana.com/)) +- Innokentii Konstantinov — [@Konstantinov-Innokentii](https://github.com/Konstantinov-Innokentii) ([Grafana Labs](https://grafana.com/)) +- Matías Bordese — [@matiasb](https://github.com/matiasb) ([Grafana Labs](https://grafana.com/)) +- Matvey Kukuy — [@Matvey-Kuk](https://github.com/Matvey-Kuk) ([Grafana Labs](https://grafana.com/)) +- Michael Derynck — [@mderynck](https://github.com/mderynck) ([Grafana Labs](https://grafana.com/)) +- Vadim Stepanov — [@vadimkerr](https://github.com/vadimkerr) ([Grafana Labs](https://grafana.com/)) +- Yulia Shanyrova — [@Ukochka](https://github.com/Ukochka) ([Grafana Labs](https://grafana.com/)) +- Maxim Mordasov — [@maskin25](https://github.com/maskin25) ([Grafana Labs](https://grafana.com/)) +- Julia Artyukhina — [@Ferril](https://github.com/Ferril) ([Grafana Labs](https://grafana.com/)) +- Julia Artyukhina — [@Ferril](https://github.com/Ferril) ([Grafana Labs](https://grafana.com/)) + +Previous team members: + +- n/a + +### Maintainers + +Maintainers lead one or more project(s) or parts thereof and serve as a point of conflict resolution amongst the contributors to this project. Ideally, maintainers are also team members, but exceptions are possible for suitable maintainers that, for whatever reason, are not yet team members. + +Changes in maintainership have to be announced on the [announcemount][https://github.com/grafana/oncall/discussions/categories/announcements] category at the GitHub Discussions. They are decided by [rough consensus](#consensus) and formalized by changing the [`MAINTAINERS.md`][maintainers] file of the respective repository. + +Maintainers are granted commit rights to all projects covered by this governance. + +A maintainer or committer may resign by notifying the [team mailing list][team]. A maintainer with no project activity for a year is considered to have resigned. Maintainers that wish to resign are encouraged to propose another team member to take over the project. + +A project may have multiple maintainers, as long as the responsibilities are clearly agreed upon between them. This includes coordinating who handles which issues and pull requests. + +### Technical decisions + +Technical decisions that only affect a single project are made informally by the maintainer of this project, and [rough consensus](#consensus) is assumed. Technical decisions that span multiple parts of the project should be discussed and made on the the [GitHub Discussions][https://github.com/grafana/oncall/discussions]. + +Decisions are usually made by [rough consensus](#consensus). If no consensus can be reached, the matter may be resolved by [majority vote](#majority-vote). + +### Governance changes + +Changes to this document are made by Grafana Labs. + +### Other matters + +Any matter that needs a decision may be called to a vote by any member if they deem it necessary. For private or personnel matters, discussion and voting takes place on the [team mailing list][team], otherwise on the [GitHub Discussions][https://github.com/grafana/oncall/discussions]. + +## Voting + +The OnCall project usually runs by informal consensus, however sometimes a formal decision must be made. + +Depending on the subject matter, as laid out [above](#decision-making), different methods of voting are used. + +For all votes, voting must be open for at least one week. The end date should be clearly stated in the call to vote. A vote may be called and closed early if enough votes have come in one way so that further votes cannot change the final decision. + +In all cases, all and only [team members](#team-members) are eligible to vote, with the sole exception of the forced removal of a team member, in which said member is not eligible to vote. + +Discussion and votes on personnel matters (including but not limited to team membership and maintainership) are held in private on the [team mailing list][team]. All other discussion and votes are held in public on the [GitHub Discussions][https://github.com/grafana/oncall/discussions]. + +For public discussions, anyone interested is encouraged to participate. Formal power to object or vote is limited to [team members](#team-members). + +### Consensus + +The default decision making mechanism for the OnCall project is [rough][rough] consensus. This means that any decision on technical issues is considered supported by the [team][team] as long as nobody objects or the objection has been considered but not necessarily accommodated. + +Silence on any consensus decision is implicit agreement and equivalent to explicit agreement. Explicit agreement may be stated at will. Decisions may, but do not need to be called out and put up for decision on the [GitHub Discussions][https://github.com/grafana/oncall/discussions] at any time and by anyone. + +Consensus decisions can never override or go against the spirit of an earlier explicit vote. + +If any [team member](#team-members) raises objections, the team members work together towards a solution that all involved can accept. This solution is again subject to rough consensus. + +In case no consensus can be found, but a decision one way or the other must be made, any [team member](#team-members) may call a formal [majority vote](#majority-vote). + +### Majority vote + +Majority votes must be called explicitly in a separate thread on the appropriate mailing list. The subject must be prefixed with `[VOTE]`. In the body, the call to vote must state the proposal being voted on. It should reference any discussion leading up to this point. + +Votes may take the form of a single proposal, with the option to vote yes or no, or the form of multiple alternatives. + +A vote on a single proposal is considered successful if more vote in favor than against. + +If there are multiple alternatives, members may vote for one or more alternatives, or vote “no” to object to all alternatives. It is not possible to cast an “abstain” vote. A vote on multiple alternatives is considered decided in favor of one alternative if it has received the most votes in favor, and a vote from more than half of those voting. Should no alternative reach this quorum, another vote on a reduced number of options may be called separately. + +### Supermajority vote + +Supermajority votes must be called explicitly in a separate thread on the appropriate mailing list. The subject must be prefixed with `[VOTE]`. In the body, the call to vote must state the proposal being voted on. It should reference any discussion leading up to this point. + +Votes may take the form of a single proposal, with the option to vote yes or no, or the form of multiple alternatives. + +A vote on a single proposal is considered successful if at least two thirds of those eligible to vote vote in favor. + +If there are multiple alternatives, members may vote for one or more alternatives, or vote “no” to object to all alternatives. A vote on multiple alternatives is considered decided in favor of one alternative if it has received the most votes in favor, and a vote from at least two thirds of those eligible to vote. Should no alternative reach this quorum, another vote on a reduced number of options may be called separately. + +## On- / Offboarding + +### Onboarding + +The new member is + +- added to the list of [team members](#team-members). Ideally by sending a PR of their own, at least approving said PR. +- announced on the [GitHub Discussions][https://github.com/grafana/oncall/discussions] by an existing team member. Ideally, the new member replies in this thread, acknowledging team membership. +- added to the projects with commit rights. +- added to the [team mailing list][team]. + +### Offboarding + +The ex-member is + +- removed from the list of [team members](#team-members). Ideally by sending a PR of their own, at least approving said PR. In case of forced removal, no approval is needed. +- removed from the projects. Optionally, they can retain maintainership of one or more repositories if the [team](#team-members) agrees. +- removed from the team mailing list and demoted to a normal member of the other mailing lists. +- not allowed to call themselves an active team member any more, nor allowed to imply this to be the case. +- added to a list of previous members if they so choose. + +If needed, we reserve the right to publicly announce removal. diff --git a/LICENSING.md b/LICENSING.md index 4e53ac0d..34951583 100644 --- a/LICENSING.md +++ b/LICENSING.md @@ -9,9 +9,11 @@ The default license for this project is [AGPL-3.0-only](LICENSE). The following directories and their subdirectories are licensed under Apache-2.0: ``` +n/a ``` The following directories and their subdirectories are licensed under their original upstream licenses: ``` +n/a ``` diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 00000000..bd9b78f3 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,14 @@ +The following are the main/default maintainers: + +- Ildar Iskhakov — [@iskhakov](https://github.com/iskhakov) ([Grafana Labs](https://grafana.com/)) +- Matvey Kukuy — [@Matvey-Kuk](https://github.com/Matvey-Kuk) ([Grafana Labs](https://grafana.com/)) + +Some parts of the codebase have other maintainers, the package paths also include all sub-packages: + +n/a + +For the sake of brevity, not all subtrees are explicitly listed. Due to the +size of this repository, the natural changes in focus of maintainers over time, +and nuances of where particular features live, this list will always be +incomplete and out of date. However the listed maintainer(s) should be able to +direct a PR/question to the right person. diff --git a/README.md b/README.md index aa8300dc..6e94081a 100644 --- a/README.md +++ b/README.md @@ -1,76 +1,61 @@ -# Grafana OnCall Incident Response -Grafana OnCall, cloud version of Grafana OnCall: https://grafana.com/products/cloud/ + -Developer-friendly, incident response management with brilliant Slack integration. -- Connect monitoring systems -- Collect and analyze data -- On-call rotation -- Automatic escalation -- Never miss alerts with calls and SMS +Developer-friendly, incident response with brilliant Slack integration. -![Grafana OnCall Screenshot](screenshot.png) + + +- Collect and analyze alerts from multiple monitoring systems +- On-call rotations based on schedules +- Automatic escalations +- Phone calls, SMS, Slack, Telegram notifications ## Getting Started -OnCall consists of two parts: -1. OnCall backend -2. "Grafana OnCall" plugin you need to install in your Grafana -### How to run OnCall backend -1. An all-in-one image of OnCall is available on docker hub to run it: +We prepared multiple environments: [production](https://grafana.com/docs/grafana-cloud/oncall/open-source/#production-environment), [developer](DEVELOPER.md) and hobby: + +1. Download docker-compose.yaml: ```bash -docker run -it --name oncall-backend -p 8000:8000 grafana/oncall-all-in-one +curl https://github.com/grafana/oncall/blob/dev/docker-compose.yml -o docker-compose.yaml ``` -2. When the image starts up you will see a message like this: +2. Set variables: ```bash -👋 This script will issue an invite token to securely connect the frontend. -Maintainers will be happy to help in the slack channel #grafana-oncall: https://slack.grafana.com/ -Your invite token: , use it in the Grafana OnCall plugin. +echo "DOMAIN=http://localhost +SECRET_KEY=my_random_secret_must_be_more_than_32_characters_long +RABBITMQ_PASSWORD=rabbitmq_secret_pw +MYSQL_PASSWORD=mysql_secret_pw +COMPOSE_PROFILES=with_grafana # Remove this line if you want to use existing grafana +GRAFANA_USER=admin +GRAFANA_PASSWORD=admin" > .env_hobby ``` -3. If you started your container detached with -d check the log: +3. Launch services: ```bash -docker logs oncall-backend +docker-compose --env-file .env_hobby -f docker-compose.yml up --build -d ``` -### How to install "Grafana OnCall" Plugin and connect with a backend -1. Open Grafana in your browser and login as an Admin -2. Navigate to Configuration → Plugins -3. Type Grafana OnCall into the "Search Grafana plugins" field -4. Select the Grafana OnCall plugin and press the "Install" button -5. On the Grafana OnCall Plugin page Enable the plugin and go to the Configuration tab you should see a status field with the message -``` -OnCall has not been setup, configure & initialize below. -``` -6. Fill in configuration fields using the token you got from the backend earlier, then press "Install Configuration" -``` -OnCall API URL: (The URL & port used to access OnCall) -http://host.docker.internal:8000 - -OnCall Invitation Token (Single use token to connect Grafana instance): -Invitation token from docker startup - -Grafana URL (URL OnCall will use to talk to this Grafana instance): -http://localhost:3000 (or http://host.docker.internal:3000 if your grafana is running in Docker locally) +4. Issue one-time invite token: +```bash +docker-compose --env-file .env_hobby -f docker-compose.yml run engine python manage.py issue_invite_for_the_frontend --override ``` -## Getting Help -- `#grafana-oncall` channel at https://slack.grafana.com/ -- Grafana Labs community forum for OnCall: https://community.grafana.com -- File an [issue](https://github.com/grafana/oncall/issues) for bugs, issues and feature suggestions. +5. Go to [OnCall Plugin Configuration](http://localhost:3000/plugins/grafana-oncall-app) (or find OnCall plugin in configuration->plugins) and connect OnCall _plugin_ with OnCall _backend_: +``` +Invite token: ^^^ from the previous step. +OnCall backend URL: http://engine:8080 +Grafana Url: http://grafana:3000 +``` -## Production Setup +6. Enjoy! -Looking for the production instructions? We're going to release them soon. Please join our Slack channel to be the first to know about them. + +## Join community + + + + ## Further Reading - *Documentation* - [Grafana OnCall](https://grafana.com/docs/grafana-cloud/oncall/) - *Blog Post* - [Announcing Grafana OnCall, the easiest way to do on-call management](https://grafana.com/blog/2021/11/09/announcing-grafana-oncall/) - *Presentation* - [Deep dive into the Grafana, Prometheus, and Alertmanager stack for alerting and on-call management](https://grafana.com/go/observabilitycon/2021/alerting/?pg=blog) - -## FAQ - -- How do I generate a new invitation token to connect plugin with a backend? -```bash -docker exec oncall-backend python manage.py issue_invite_for_the_frontend --override -``` diff --git a/developer-docker-compose.yml b/docker-compose-developer.yml similarity index 83% rename from developer-docker-compose.yml rename to docker-compose-developer.yml index b24312d6..71280b77 100644 --- a/developer-docker-compose.yml +++ b/docker-compose-developer.yml @@ -12,7 +12,7 @@ services: ports: - 3306:3306 environment: - MYSQL_ROOT_PASSWORD: local_dev_pwd + MYSQL_ROOT_PASSWORD: empty MYSQL_DATABASE: oncall_local_dev healthcheck: test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ] @@ -42,13 +42,13 @@ services: mysql-to-create-grafana-db: image: mariadb:10.2 platform: linux/x86_64 - command: bash -c "mysql -h mysql -uroot -plocal_dev_pwd -e 'CREATE DATABASE IF NOT EXISTS grafana CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'" + command: bash -c "mysql -h mysql -uroot -pempty -e 'CREATE DATABASE IF NOT EXISTS grafana CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'" depends_on: mysql: condition: service_healthy grafana: - image: "grafana/grafana:8.3.2" + image: "grafana/grafana:9.0.0-beta3" restart: always mem_limit: 500m cpus: 0.5 @@ -56,7 +56,7 @@ services: GF_DATABASE_TYPE: mysql GF_DATABASE_HOST: mysql GF_DATABASE_USER: root - GF_DATABASE_PASSWORD: local_dev_pwd + GF_DATABASE_PASSWORD: empty GF_SECURITY_ADMIN_USER: oncall GF_SECURITY_ADMIN_PASSWORD: oncall GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..e3f5dca5 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,174 @@ +services: + engine: + # TODO: change to the public image once it's public + # image: ... + build: engine + ports: + - 8080:8080 + command: > + sh -c "uwsgi --ini uwsgi.ini" + environment: + BASE_URL: $DOMAIN + SECRET_KEY: $SECRET_KEY + RABBITMQ_USERNAME: "rabbitmq" + RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD + RABBITMQ_HOST: "rabbitmq" + RABBITMQ_PORT: "5672" + RABBITMQ_DEFAULT_VHOST: "/" + MYSQL_PASSWORD: $MYSQL_PASSWORD + MYSQL_DB_NAME: oncall_hobby + MYSQL_USER: ${MYSQL_USER:-root} + MYSQL_HOST: ${MYSQL_HOST:-mysql} + MYSQL_PORT: 3306 + REDIS_URI: redis://redis:6379/0 + DJANGO_SETTINGS_MODULE: settings.hobby + OSS: "True" + CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery" + depends_on: + mysql: + condition: service_healthy + oncall_db_migration: + condition: service_completed_successfully + rabbitmq: + condition: service_started + redis: + condition: service_started + + celery: + # TODO: change to the public image once it's public + build: engine + command: sh -c "./celery_with_exporter.sh" + environment: + BASE_URL: $DOMAIN + SECRET_KEY: $SECRET_KEY + RABBITMQ_USERNAME: "rabbitmq" + RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD + RABBITMQ_HOST: "rabbitmq" + RABBITMQ_PORT: "5672" + RABBITMQ_DEFAULT_VHOST: "/" + MYSQL_PASSWORD: $MYSQL_PASSWORD + MYSQL_DB_NAME: oncall_hobby + MYSQL_USER: ${MYSQL_USER:-root} + MYSQL_HOST: ${MYSQL_HOST:-mysql} + MYSQL_PORT: 3306 + REDIS_URI: redis://redis:6379/0 + DJANGO_SETTINGS_MODULE: settings.hobby + OSS: "True" + CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery" + CELERY_WORKER_CONCURRENCY: "1" + CELERY_WORKER_MAX_TASKS_PER_CHILD: "100" + CELERY_WORKER_SHUTDOWN_INTERVAL: "65m" + CELERY_WORKER_BEAT_ENABLED: "True" + depends_on: + mysql: + condition: service_healthy + oncall_db_migration: + condition: service_completed_successfully + rabbitmq: + condition: service_started + redis: + condition: service_started + + oncall_db_migration: + build: engine + command: python manage.py migrate --noinput + environment: + BASE_URL: $DOMAIN + SECRET_KEY: $SECRET_KEY + RABBITMQ_USERNAME: "rabbitmq" + RABBITMQ_PASSWORD: $RABBITMQ_PASSWORD + RABBITMQ_HOST: "rabbitmq" + RABBITMQ_PORT: "5672" + RABBITMQ_DEFAULT_VHOST: "/" + MYSQL_PASSWORD: $MYSQL_PASSWORD + MYSQL_DB_NAME: oncall_hobby + MYSQL_USER: ${MYSQL_USER:-root} + MYSQL_HOST: ${MYSQL_HOST:-mysql} + MYSQL_PORT: 3306 + REDIS_URI: redis://redis:6379/0 + DJANGO_SETTINGS_MODULE: settings.hobby + OSS: "True" + CELERY_WORKER_QUEUE: "default,critical,long,slack,telegram,webhook,retry,celery" + depends_on: + mysql: + condition: service_healthy + + mysql: + image: mysql:5.7 + platform: linux/x86_64 + mem_limit: 500m + cpus: 0.5 + command: --default-authentication-plugin=mysql_native_password + restart: always + ports: + - 3306:3306 + volumes: + - dbdata:/var/lib/mysql + environment: + MYSQL_ROOT_PASSWORD: $MYSQL_PASSWORD + MYSQL_DATABASE: oncall_hobby + healthcheck: + test: "mysql -uroot -p$MYSQL_PASSWORD oncall_hobby -e 'select 1'" + timeout: 20s + retries: 10 + + redis: + image: redis + mem_limit: 100m + cpus: 0.1 + restart: always + ports: + - 6379:6379 + + rabbitmq: + image: "rabbitmq:3.7.15-management" + hostname: rabbitmq + mem_limit: 1000m + cpus: 0.5 + volumes: + - rabbitmqdata:/var/lib/rabbitmq + environment: + RABBITMQ_DEFAULT_USER: "rabbitmq" + RABBITMQ_DEFAULT_PASS: $RABBITMQ_PASSWORD + RABBITMQ_DEFAULT_VHOST: "/" + + mysql_to_create_grafana_db: + image: mysql:5.7 + platform: linux/x86_64 + command: bash -c "mysql -h ${MYSQL_HOST:-mysql} -uroot -p${MYSQL_PASSWORD:?err} -e 'CREATE DATABASE IF NOT EXISTS grafana CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'" + depends_on: + mysql: + condition: service_healthy + profiles: + - with_grafana + + grafana: + image: "grafana/grafana:9.0.0-beta3" + mem_limit: 500m + ports: + - 3000:3000 + cpus: 0.5 + environment: + GF_DATABASE_TYPE: mysql + GF_DATABASE_HOST: ${MYSQL_HOST:-mysql} + GF_DATABASE_USER: ${MYSQL_USER:-root} + GF_DATABASE_PASSWORD: ${MYSQL_PASSWORD:?err} + GF_SECURITY_ADMIN_USER: ${GRAFANA_USER:-admin} + GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:?err} + GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: grafana-oncall-app + GF_INSTALL_PLUGINS: grafana-oncall-app + volumes: + - ./grafana-plugin:/var/lib/grafana/plugins/grafana-plugin + depends_on: + mysql_to_create_grafana_db: + condition: service_completed_successfully + mysql: + condition: service_healthy + profiles: + - with_grafana + +volumes: + dbdata: + rabbitmqdata: + caddy_data: + caddy_config: diff --git a/docs/Makefile b/docs/Makefile index 5ddacacf..e66f1c1c 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,5 +1,5 @@ IMAGE = grafana/docs-base:latest -CONTENT_PATH = /hugo/content/docs/amixr/latest +CONTENT_PATH = /hugo/content/docs/oncall/latest PORT = 3002:3002 .PHONY: pull diff --git a/docs/README.md b/docs/README.md index 8d702ceb..b6a557c7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,5 +4,5 @@ Source for documentation at https://grafana.com/docs/amixr/ ## Preview the website -Run `make docs`. This launches a preview of the website with the current grafana docs at `http://localhost:3002/docs/amixr/` which will refresh automatically when changes are made to content in the `sources` directory. +Run `make docs`. This launches a preview of the website with the current grafana docs at `http://localhost:3002/docs/oncall/latest/` which will refresh automatically when changes are made to content in the `sources` directory. Make sure Docker is running. diff --git a/docs/img/GH_discussions.png b/docs/img/GH_discussions.png new file mode 100644 index 00000000..d3a1798a Binary files /dev/null and b/docs/img/GH_discussions.png differ diff --git a/docs/img/community_call.png b/docs/img/community_call.png new file mode 100644 index 00000000..22692fad Binary files /dev/null and b/docs/img/community_call.png differ diff --git a/docs/img/logo.png b/docs/img/logo.png new file mode 100644 index 00000000..ec11fc3a Binary files /dev/null and b/docs/img/logo.png differ diff --git a/docs/img/slack.png b/docs/img/slack.png new file mode 100644 index 00000000..d5ec5e6d Binary files /dev/null and b/docs/img/slack.png differ diff --git a/docs/sources/_index.md b/docs/sources/_index.md index ec446b6f..e2b19d95 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -1,9 +1,18 @@ -+++ -title = "Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "OnCall", "irm"] -weight = 1000 -aliases = ["/docs/grafana-cloud/oncall/"] -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/ + - /docs/oncall/latest/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - OnCall + - irm +title: Grafana OnCall +weight: 1000 +--- # Grafana OnCall @@ -13,4 +22,4 @@ When you integrate an alert monitoring system with Grafana OnCall, the alerts wi Follow these links to learn more: -{{< section >}} \ No newline at end of file +{{< section >}} diff --git a/docs/sources/calendar-schedules/_index.md b/docs/sources/calendar-schedules/_index.md index 3b322720..7dff67a0 100644 --- a/docs/sources/calendar-schedules/_index.md +++ b/docs/sources/calendar-schedules/_index.md @@ -1,10 +1,15 @@ -+++ -title = "Configure and manage on-call schedules" -description = "" -keywords = ["Grafana", "oncall", "on-call", "calendar"] -aliases = [] -weight = 900 -+++ +--- +aliases: + - /docs/oncall/latest/calendar-schedules/ +description: "" +keywords: + - Grafana + - oncall + - on-call + - calendar +title: Configure and manage on-call schedules +weight: 900 +--- # Configure and manage on-call schedules @@ -20,26 +25,26 @@ When you create a schedule, you will be able to select a Slack channel, associat ## Create an on-call schedule calendar -Create a primary calendar and an optional override calendar to schedule on-call shifts for team members. +Create a primary calendar and an optional override calendar to schedule on-call shifts for team members. 1. In the **Scheduling** section of Grafana OnCall, click **+ Create schedule**. -1. Give the schedule a name. +1. Give the schedule a name. 1. Create a new calendar in your calendar service and locate the secret iCal URL. For example, in a Google calendar, this URL can be found in **Settings > Settings for my calendars > Integrate calendar**. -1. Copy the secret iCal URL. In OnCall, paste it into the **Primary schedule for iCal URL** field. - The permissions you set when you create the calendar determine who can modify the calendar. +1. Copy the secret iCal URL. In OnCall, paste it into the **Primary schedule for iCal URL** field. + The permissions you set when you create the calendar determine who can modify the calendar. 1. Click **Create Schedule**. 1. Schedule on-call times for team members. - Use the Grafana username of team members as the event name to schedule their on-call times. You can take advantage of all of the features of your calendar service. + Use the Grafana username of team members as the event name to schedule their on-call times. You can take advantage of all of the features of your calendar service. -1. Create overlapping schedules (optional). +1. Create overlapping schedules (optional). - When you create schedules that overlap, you can prioritize a schedule by adding a level marker. For example, if users AliceGrafana and BobGrafana have overlapping schedules, but BobGrafana is the primary contact, you would name his event `[L1] BobGrafana`, AliceGrafana maintains the default `[L0]` status, and would not receive notifications during the overlapping time. You can prioritize up to and including a level 9 prioritization, or `[L9]`. + When you create schedules that overlap, you can prioritize a schedule by adding a level marker. For example, if users AliceGrafana and BobGrafana have overlapping schedules, but BobGrafana is the primary contact, you would name his event `[L1] BobGrafana`, AliceGrafana maintains the default `[L0]` status, and would not receive notifications during the overlapping time. You can prioritize up to and including a level 9 prioritization, or `[L9]`. # Create an override calendar (optional) @@ -47,10 +52,10 @@ You can use an override calendar to allow team members to schedule on-call dutie 1. Create a new calendar using the same calendar service you used to create the primary calendar. - Be sure to set permissions that allow team members to edit the calendar. + Be sure to set permissions that allow team members to edit the calendar. -1. In the scheduling section of Grafana OnCall, select the primary calendar you want to override. +1. In the scheduling section of Grafana OnCall, select the primary calendar you want to override. -1. Click **Edit**. +1. Click **Edit**. -1. Enter the secret iCal URL in the **Overrides schedule iCal URL** field and click **Update**. \ No newline at end of file +1. Enter the secret iCal URL in the **Overrides schedule iCal URL** field and click **Update**. diff --git a/docs/sources/chat-options/_index.md b/docs/sources/chat-options/_index.md index 9a4ad687..324356d3 100644 --- a/docs/sources/chat-options/_index.md +++ b/docs/sources/chat-options/_index.md @@ -1,13 +1,22 @@ -+++ -title = "Connect ChatOps to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "slack"] -weight = 700 -+++ +--- +aliases: + - /docs/oncall/latest/chat-options/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - slack +title: Connect ChatOps to Grafana OnCall +weight: 700 +--- -# Connect ChatOps to Grafana OnCall +# Connect ChatOps to Grafana OnCall Grafana OnCall directly supports the export of alert notifications to some popular messaging applications like Slack and Telegram. You can use outgoing webhooks to applications that aren't directly supported. For information on configuring outgoing webhooks, see [Send alert group notifications by webhook]({{< relref "../integrations/webhooks/configure-outgoing-webhooks.md" >}}). To configure supported messaging apps, see the following topics: -{{< section >}} \ No newline at end of file +{{< section >}} diff --git a/docs/sources/chat-options/configure-slack.md b/docs/sources/chat-options/configure-slack.md index 0d7e9002..5b330838 100644 --- a/docs/sources/chat-options/configure-slack.md +++ b/docs/sources/chat-options/configure-slack.md @@ -1,20 +1,30 @@ -+++ -title = "Connect Slack to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "slack"] -weight = 100 -+++ +--- +aliases: + - /docs/oncall/latest/chat-options/configure-slack/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - slack +title: Connect Slack to Grafana OnCall +weight: 100 +--- # Connect Slack to Grafana OnCall -Grafana OnCall integrates closely with your Slack workspace to deliver alert group notifications to individuals, groups, and team members. + +Grafana OnCall integrates closely with your Slack workspace to deliver alert group notifications to individuals, groups, and team members. ## Connect to Slack Connect your organization's Slack workspace to your Grafana OnCall instance. ->**NOTE:** Only Grafana users with the administrator role can configure OnCall settings. +> **NOTE:** Only Grafana users with the administrator role can configure OnCall settings. 1. In OnCall, click on the **ChatOps** tab and select Slack in the side menu. -1. Click **Install Slack integration**. +1. Click **Install Slack integration**. 1. Read the notice and click the button to proceed to the Slack website. 1. Sign in to your organization's workspace. 1. Click **Allow** to allow OnCall to access Slack. @@ -22,12 +32,13 @@ Connect your organization's Slack workspace to your Grafana OnCall instance. ## Configure Slack in OnCall -In the Slack settings for Grafana OnCall, administrators can set a default Slack channel for notifications and opt to set reminders for acknowledged alerts that can timeout and revert an alert group to the unacknowledged state. +In the Slack settings for Grafana OnCall, administrators can set a default Slack channel for notifications and opt to set reminders for acknowledged alerts that can timeout and revert an alert group to the unacknowledged state. 1. In OnCall, click on the **ChatOps** tab and select Slack in the side menu. 1. In the first dropdown menu, select a default Slack channel. - When you set up escalation policies to notify Slack channels of incoming alerts, the default will be the one you set here. You will still have the option to select from all the channels available in your organization. + When you set up escalation policies to notify Slack channels of incoming alerts, the default will be the one you set here. You will still have the option to select from all the channels available in your organization. 1. In **Additional settings** you can choose how to remind users of acknowledged but unresolved alert groups. You can also select whether and or when to automatically revoke the "acknowledged" status from an alert group to an unacknowledged state. ## Slack settings for on-call calendar scheduling notifications -Admins can configure settings in Slack to notify people and groups about on-call schedules. When an on-call shift notification is sent to a person or channel, click the gear button to access **Notification preferences**. Use the options to configure the behavior of future shift notifications. \ No newline at end of file + +Admins can configure settings in Slack to notify people and groups about on-call schedules. When an on-call shift notification is sent to a person or channel, click the gear button to access **Notification preferences**. Use the options to configure the behavior of future shift notifications. diff --git a/docs/sources/chat-options/configure-telegram.md b/docs/sources/chat-options/configure-telegram.md index b56104a6..ec6128eb 100644 --- a/docs/sources/chat-options/configure-telegram.md +++ b/docs/sources/chat-options/configure-telegram.md @@ -1,12 +1,21 @@ -+++ -title = "Connect Telegram to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "telegram"] -weight = 300 -+++ +--- +aliases: + - /docs/oncall/latest/chat-options/configure-telegram/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - telegram +title: Connect Telegram to Grafana OnCall +weight: 300 +--- # Connect Telegram to Grafana OnCall -You can use Telegram to deliver alert group notifications to a dedicated channel, and allow users to perform notification actions. +You can use Telegram to deliver alert group notifications to a dedicated channel, and allow users to perform notification actions. Each alert group notification is assigned a dedicated discussion. Users can perform notification actions (acknowledge, resolve, silence), create reports, and discuss alerts in the comments section of the discussions. @@ -14,19 +23,19 @@ Each alert group notification is assigned a dedicated discussion. Users can perf Connect your organization's Telegram account to your Grafana OnCall instance by following the instructions provided in OnCall. You can use the following steps as a reference. ->**NOTE:** Only Grafana users with the administrator role can configure OnCall settings. +> **NOTE:** Only Grafana users with the administrator role can configure OnCall settings. 1. In OnCall, click on the **ChatOps** tab and select Telegram in the side menu. 1. Click **Connect Telegram channel** and follow the instructions, mirrored here for reference. A unique verification code will be generated that you must use to activate the channel. -1. In your team Telegram account, create a new channel, and set it to **Private**. +1. In your team Telegram account, create a new channel, and set it to **Private**. 1. In **Manage Channel**, make sure **Sign messages** is enabled. 1. Create a new discussion group. - This group handles alert actions and comments. -1. Add the discussion group to the channel. - In **Manage Channel**, click **Discussion** to find and add the new group. -1. In OnCall, click the link to the OnCall bot to add it to your contacts. -1. In Telegram, add the bot to your channel as an Admin. Allow it to **Post Messages**. -1. Add the bot to the discussion group. + This group handles alert actions and comments. +1. Add the discussion group to the channel. + In **Manage Channel**, click **Discussion** to find and add the new group. +1. In OnCall, click the link to the OnCall bot to add it to your contacts. +1. In Telegram, add the bot to your channel as an Admin. Allow it to **Post Messages**. +1. Add the bot to the discussion group. 1. In OnCall, send the provided verification code to the channel. 1. Make sure users connect to Telegram in their OnCall user profile. @@ -36,4 +45,4 @@ Connect your organization's Telegram account to your Grafana OnCall instance by 1. Click **Connect automatically** for the bot to message you and to bring up your telegram account. 1. Click **Start** when the OnCall bot messages you. -If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**. \ No newline at end of file +If you want to connect manually, you can click the URL provided and then **SEND MESSAGE**. In your Telegram account, click **Start**. diff --git a/docs/sources/configure-user-settings.md b/docs/sources/configure-user-settings.md index 76df57c8..7654c795 100644 --- a/docs/sources/configure-user-settings.md +++ b/docs/sources/configure-user-settings.md @@ -1,38 +1,47 @@ -+++ -title = "Manage users and teams for Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"] -weight = 1100 -+++ +--- +aliases: + - /docs/oncall/latest/configure-user-settings/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - integrations +title: Manage users and teams for Grafana OnCall +weight: 1100 +--- # Manage users and teams for Grafana OnCall -Grafana OnCall is configured based on the teams you've created on the organization level of your Grafana instance, in **Configuration > Teams**. Administrators can create a different configuration for each team, and can navigate between team configurations in the **Select Team** dropdown menu in the **Incidents** section of Grafana OnCall. +Grafana OnCall is configured based on the teams you've created on the organization level of your Grafana instance, in **Configuration > Teams**. Administrators can create a different configuration for each team, and can navigate between team configurations in the **Select Team** dropdown menu in the **Incidents** section of Grafana OnCall. Users can edit their contact information, but user permissions are assigned at the Cloud portal level. ## Configure user notification policies -Administrators can configure how each user will receive notifications when they are are scheduled to receive them in escalation chains. Users can verify phone numbers and email addresses. +Administrators can configure how each user will receive notifications when they are are scheduled to receive them in escalation chains. Users can verify phone numbers and email addresses. ->**NOTE**: You cannot add users or manage permissions in Grafana OnCall. Most user settings are found on the organizational level of your Grafana instance in **Configuration > Users**. +> **NOTE**: You cannot add users or manage permissions in Grafana OnCall. Most user settings are found on the organizational level of your Grafana instance in **Configuration > Users**. 1. Find users. - - Select the **Users** tab and use the browser to search for a user in the team associated with the OnCall configuration. + + Select the **Users** tab and use the browser to search for a user in the team associated with the OnCall configuration. 1. Configure user settings. - Add and verify a phone number, a Slack username, and a Telegram account if you want to receive notifications using these mediums. + Add and verify a phone number, a Slack username, and a Telegram account if you want to receive notifications using these mediums. - >**NOTE:** To edit a user's profile username, email, or role, you must do so in the **Users** tab in the **Configuration** menu of your Grafana instance. + > **NOTE:** To edit a user's profile username, email, or role, you must do so in the **Users** tab in the **Configuration** menu of your Grafana instance. 1. Configure notification settings. - - Specify the notification medium and frequency for each user. Notification steps will be followed in the order they are listed. - - The settings you specify in **Default Notifications** dictate how a user is notified for most escalation thresholds. - - **Important Notifications** are labeled in escalation chains. If an escalation event is marked as an important notification, it will bypass **Default Notification** settings and notify the user by the method specified. + + Specify the notification medium and frequency for each user. Notification steps will be followed in the order they are listed. + + The settings you specify in **Default Notifications** dictate how a user is notified for most escalation thresholds. + + **Important Notifications** are labeled in escalation chains. If an escalation event is marked as an important notification, it will bypass **Default Notification** settings and notify the user by the method specified. ## Configure Telegram user settings in OnCall @@ -45,4 +54,4 @@ If you want to connect manually, you can click the URL provided and then **SEND ## Configure Slack user settings in OnCall 1. In your profile, find the Slack setting and click **Connect**. -1. Follow the instructions to verify your account. \ No newline at end of file +1. Follow the instructions to verify your account. diff --git a/docs/sources/escalation-policies/_index.md b/docs/sources/escalation-policies/_index.md index 02605174..def9d900 100644 --- a/docs/sources/escalation-policies/_index.md +++ b/docs/sources/escalation-policies/_index.md @@ -4,6 +4,7 @@ keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "onc weight = 500 +++ + # Configure Escalation Chains and Routes for Grafana OnCall Escalation Chains and Routes for Grafana OnCall @@ -12,4 +13,4 @@ Administrators can create escalation policies to automatically send alert group See the following topics for more information: -{{< section >}} \ No newline at end of file +{{< section >}} diff --git a/docs/sources/escalation-policies/configure-escalation-chains.md b/docs/sources/escalation-policies/configure-escalation-chains.md index a0b058f3..9430dd24 100644 --- a/docs/sources/escalation-policies/configure-escalation-chains.md +++ b/docs/sources/escalation-policies/configure-escalation-chains.md @@ -1,22 +1,34 @@ -+++ -title = "Configure and manage Escalation Chains" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"] -weight = 100 -+++ +--- +aliases: + - /docs/oncall/latest/escalation-policies/configure-escalation-chains/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - integrations +title: Configure and manage Escalation Chains +weight: 100 +--- # Configure and manage Escalation Chains -Escalation policies dictate how users and groups are notified when an alert notification is created. They can be very simple, or very complex. You can define as many escalation configurations for an integration as you need, and you can send notifications for certain alerts to a designated place when certain conditions are met, or not met. +Escalation policies dictate how users and groups are notified when an alert notification is created. They can be very simple, or very complex. You can define as many escalation configurations for an integration as you need, and you can send notifications for certain alerts to a designated place when certain conditions are met, or not met. Escalation policies have three main parts: -* User settings, where a user sets up their preferred or required notification method. -* An **escalation chain**, which can have one or more steps that are followed in order when a notification is triggered. -* A **route**, that allows administrators to manage notifications by flagging expressions in an alert payload. + +- User settings, where a user sets up their preferred or required notification method. +- An **escalation chain**, which can have one or more steps that are followed in order when a notification is triggered. +- A **route**, that allows administrators to manage notifications by flagging expressions in an alert payload. ## Escalation chains + An escalation chain can have many steps, or only one step. For example, steps can be configured to notify multiple users in some order, notify users that are scheduled for on-call shifts, ping groups in Slack, use outgoing webhooks to integrate with other services, such as JIRA, and do a number of other automated notification tasks. ## Routes + An escalation workflow can employ **routes** that administrators can configure to filter alerts by regular expressions in their payloads. Notifications for these alerts can be sent to individuals, or they can make use of a new or existing escalation chain. -To learn how to configure escalation chains and routes, see [Configure escalation policies]({{< relref "configure-escalation-policies">}}). \ No newline at end of file +To learn how to configure escalation chains and routes, see [Configure escalation policies]({{< relref "configure-escalation-policies">}}). diff --git a/docs/sources/escalation-policies/configure-routes.md b/docs/sources/escalation-policies/configure-routes.md index 855363fb..18666e4c 100644 --- a/docs/sources/escalation-policies/configure-routes.md +++ b/docs/sources/escalation-policies/configure-routes.md @@ -1,42 +1,53 @@ -+++ -title = "Configure and manage Routes" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"] -weight = 300 -+++ +--- +aliases: + - /docs/oncall/latest/escalation-policies/configure-routes/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - integrations +title: Configure and manage Routes +weight: 300 +--- # Configure and manage Routes -Set up escalation chains and routes to configure escalation behavior for alert group notifications. + +Set up escalation chains and routes to configure escalation behavior for alert group notifications. ## Configure escalation chains -You can create and edit escalation chains in two places: within **Integrations**, by clicking on an integration tile, and in **Escalation Chains**. The following steps are for the **Integrations** workflow, but are generally applicable in both situations. -You can use **escalation chains** and **routes** to determine ordered escalation procedures. Escalation chains allow you to set up a series of alert group notification actions that trigger if certain conditions that you specify are met or not met. +You can create and edit escalation chains in two places: within **Integrations**, by clicking on an integration tile, and in **Escalation Chains**. The following steps are for the **Integrations** workflow, but are generally applicable in both situations. + +You can use **escalation chains** and **routes** to determine ordered escalation procedures. Escalation chains allow you to set up a series of alert group notification actions that trigger if certain conditions that you specify are met or not met. 1. Click on the integration tile for which you want to define escalation policies. - - The **Escalations** section for the notification is in the pane to the right of the list of notifications. - You can click **Change alert template and grouping** to customize the look of the alert. You can also do this by clicking the **Settings** (gear) icon in the integration tile. + + The **Escalations** section for the notification is in the pane to the right of the list of notifications. + You can click **Change alert template and grouping** to customize the look of the alert. You can also do this by clicking the **Settings** (gear) icon in the integration tile. 1. Create an escalation chain. - - In the escalation pane, click **Escalate to** to choose from previously added escalation chains, or create a new one by clicking **Make a copy** or **Create a new chain**. This will be the name of the escalation policy you define. + + In the escalation pane, click **Escalate to** to choose from previously added escalation chains, or create a new one by clicking **Make a copy** or **Create a new chain**. This will be the name of the escalation policy you define. 1. Add escalation steps. - Click **Add escalation step** to choose from a set of actions and specify their triggering conditions. By default, the first step is to notify a slack channel or user. Specify users or channels or toggle the switch to turn this step off. + Click **Add escalation step** to choose from a set of actions and specify their triggering conditions. By default, the first step is to notify a slack channel or user. Specify users or channels or toggle the switch to turn this step off. - To mark an escalation as **Important**, select the option from the step **Start** dropdown menu. User notification policies can be separately defined for **Important** and **Default** escalations. + To mark an escalation as **Important**, select the option from the step **Start** dropdown menu. User notification policies can be separately defined for **Important** and **Default** escalations. ## Create a route -To add a route, click **Add Route**. - -You can set up a single route and specify notification escalation steps, or you can add multiple routes, each with its own configuration. +To add a route, click **Add Route**. -Each route added to an escalation policy follows an `IF`, `ELSE IF`, or `ELSE` path and depends on the type of alert you specify using a regular expression that matches content in the payload body of the alert. You can also specify where to send the notification for each route. - -For example, you can send notifications for alerts with `\"severity\": \"critical\"` in the payload to an escalation chain called `Bob_OnCall`. You can create a different route for alerts with the payload `\"namespace\" *: *\"synthetic-monitoring-dev-.*\"` and select a escalation chain called `NotifySecurity`. +You can set up a single route and specify notification escalation steps, or you can add multiple routes, each with its own configuration. + +Each route added to an escalation policy follows an `IF`, `ELSE IF`, or `ELSE` path and depends on the type of alert you specify using a regular expression that matches content in the payload body of the alert. You can also specify where to send the notification for each route. + +For example, you can send notifications for alerts with `\"severity\": \"critical\"` in the payload to an escalation chain called `Bob_OnCall`. You can create a different route for alerts with the payload `\"namespace\" *: *\"synthetic-monitoring-dev-.*\"` and select a escalation chain called `NotifySecurity`. You can set up escalation steps for each route in a chain. ->**NOTE:** When you modify an escalation chain or a route, it will modify that escalation chain across all integrations that use it. \ No newline at end of file +> **NOTE:** When you modify an escalation chain or a route, it will modify that escalation chain across all integrations that use it. diff --git a/docs/sources/getting-started.md b/docs/sources/getting-started.md index d3a5c9e6..1e762b2b 100644 --- a/docs/sources/getting-started.md +++ b/docs/sources/getting-started.md @@ -5,6 +5,7 @@ aliases = ["/docs/grafana-cloud/oncall/getting-started"] weight = 100 +++ + # Get started with Grafana OnCall Grafana OnCall is an incident response tool built to help DevOps and SRE teams improve their collaboration and resolve incidents faster. @@ -18,18 +19,16 @@ These procedures introduce you to initial Grafana OnCall configuration steps, in Grafana OnCall is available for Grafana Cloud as well as Grafana open source users. You must have a Grafana Cloud account or [Open Source Grafana OnCall]({{< relref " open-source.md" >}}) - For more information, see [Grafana Pricing](https://grafana.com/pricing/) for details. +For more information, see [Grafana Pricing](https://grafana.com/pricing/) for details. -## Install Open Source Grafana OnCall (OSS Only) - +## Install Open Source Grafana OnCall For Open Source Grafana OnCall installation guidance, refer to [Open Source Grafana OnCall]({{< relref " open-source.md" >}}) >**Note:** If you are using Grafana OnCall with your Grafana Cloud instance there are no install steps. Access Grafana OnCall from your Grafana Cloud account and skip ahead to “Get alerts into Grafana OnCall” - ## Get alerts into Grafana OnCall Once you’ve installed Grafana OnCall or accessed it from your Grafana Cloud instance, you can begin integrating with monitoring systems, configuring escalation chains, and get alerts into Grafana OnCall. @@ -44,6 +43,7 @@ Regardless of where your alerts originate, you can send them to Grafana OnCall v 3. Follow the configuration steps on the integration settings page. 4. Complete any necessary configurations in your monitoring system to send alerts to Grafana OnCall. + #### Send a demo alert 1. In the integration tab, click **Send demo alert** then navigate to the **Alert Groups** tab to see your test alert firing. @@ -112,3 +112,4 @@ To integrate your on-call calendar with Grafana OnCall: For more information on on-call schedules, refer to [Configure and manage on-call schedules]({{< relref " ../calendar-schedules.md/" >}}) + diff --git a/docs/sources/integrations/_index.md b/docs/sources/integrations/_index.md index ba58b183..cfbed723 100644 --- a/docs/sources/integrations/_index.md +++ b/docs/sources/integrations/_index.md @@ -1,21 +1,29 @@ -+++ -title = "Connect to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "oncall", "integrations"] -weight = 300 -aliases = ["/docs/grafana-cloud/oncall/integrations/"] -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/integrations/ + - /docs/oncall/latest/integrations/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - oncall + - integrations +title: Connect to Grafana OnCall +weight: 300 +--- # Connect to Grafana OnCall -Integrations allow you to connect monitoring systems of your choice to send alerts to Grafana OnCall. Regardless of where your alerts originate, you can configure alerts to be sent to Grafana OnCall for alert escalation and notification. Grafana OnCall receives alerts in JSON format via a POST request, OnCall then parses alert data using preconfigured alert templates to determine alert grouping, apply routes, and determine correct escalation. +Integrations allow you to connect monitoring systems of your choice to send alerts to Grafana OnCall. Regardless of where your alerts originate, you can configure alerts to be sent to Grafana OnCall for alert escalation and notification. Grafana OnCall receives alerts in JSON format via a POST request, OnCall then parses alert data using preconfigured alert templates to determine alert grouping, apply routes, and determine correct escalation. -There are many integrations that are directly supported by Grafana OnCall. Those that aren’t currently listed in the Integrations menu can be connected using the webhook integration and configured alert templates. +There are many integrations that are directly supported by Grafana OnCall. Those that aren’t currently listed in the Integrations menu can be connected using the webhook integration and configured alert templates. ## Configure and manage integrations You can configure and manage your integrations from the **Integrations** tab in Grafana OnCall. The following sections describe how to configure and customize your integrations to ensure alerts are treated appropriately. - ### Connect an integration to Grafana OnCall To configure an integration for Grafana OnCall: @@ -25,10 +33,9 @@ To configure an integration for Grafana OnCall: 3. Follow the configuration steps on the integration settings page. 4. Complete any necessary configurations in your tool to send alerts to Grafana OnCall. - ### Manage Grafana OnCall integrations -To manage existing integrations, navigate to the **Integrations** tab in Grafana OnCall and select the integration you want to manage. +To manage existing integrations, navigate to the **Integrations** tab in Grafana OnCall and select the integration you want to manage. #### Customize alert templates and grouping @@ -37,17 +44,16 @@ To customize the alert template for an integration: 1. Select an integration from your list of enabled integrations in the **Integrations** tab. 2. Click **Change alert template and grouping**. 3. Select a template to edit from the **Edit template for** dropdown menu. -4. Edit alert templates as needed to customize the fields and content rendered for an alert. +4. Edit alert templates as needed to customize the fields and content rendered for an alert. To customize alert grouping for an integration: 1. Click **Change alert template and grouping**. 2. Select **Alert Behavior** from the dropdown menu next to **Edit template for**. -3. Edit the **grouping id**, **acknowledge condition**, and **resolve condition** templates as needed to customize your alert behavior. +3. Edit the **grouping id**, **acknowledge condition**, and **resolve condition** templates as needed to customize your alert behavior. For more information on alert templates, see [Configure alerts in Grafana OnCall]({{< relref " ../create-custom-templates/" >}}) - #### Add Routes To add a route to an integration using regular expression: @@ -56,25 +62,24 @@ To add a route to an integration using regular expression: 2. Click **+ Add Route**. 3. Use python style regex to match on your alert content. 4. Click **Create Route**. -5. Select an escalation chain for “**IF** alert payload matches regex” and “**ELSE**” to specify where to route each type of alert. +5. Select an escalation chain for “**IF** alert payload matches regex” and “**ELSE**” to specify where to route each type of alert. To learn more about routes, refer to [Configure and manage Routes]({{< relref " ../configure-routes/" >}}) - #### Edit integration name + To edit the name of an integration: -1. Navigate to the **Integrations** tab, select an integration from the list of enabled integrations. +1. Navigate to the **Integrations** tab, select an integration from the list of enabled integrations. 2. Click the **pencil icon** next to the integration name. 3. Provide a new name and click **Update**. -#### Delete integration +#### Delete integration + To delete an integration: 1. Select an integration from your list of enabled integrations in the **Integrations** tab. 2. Click the **trash can** icon next to the selected integration. 3. Confirm by clicking **Delete**. - - {{< section >}} diff --git a/docs/sources/integrations/add-webhook-integration.md b/docs/sources/integrations/add-webhook-integration.md index ebde152a..e0579381 100644 --- a/docs/sources/integrations/add-webhook-integration.md +++ b/docs/sources/integrations/add-webhook-integration.md @@ -1,36 +1,45 @@ -+++ -title = "Webhook integration for Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"] -weight = 700 -+++ +--- +aliases: + - /docs/oncall/latest/integrations/add-webhook-integration/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Alertmanager + - Prometheus +title: Webhook integration for Grafana OnCall +weight: 700 +--- # Configure Webhook integrations for Grafana OnCall -Grafana OnCall directly supports many integrations, those that aren’t currently listed in the Integrations menu can be connected using the webhook integration and configured alert templates. +Grafana OnCall directly supports many integrations, those that aren’t currently listed in the Integrations menu can be connected using the webhook integration and configured alert templates. -With the webhook integration, you can connect to any alert source that isn't listed in the **Create Integration** page. +With the webhook integration, you can connect to any alert source that isn't listed in the **Create Integration** page. There are two available formats, **Webhook** and **Formatted Webhook**. -* **Webhook** will pull all of the raw JSON payload and display it in the manner that it's received. -* **Formatted Webhook** can be used if the alert payload sent by your monitoring service is formatted in a way that OnCall recognizes. - - The following fields are recognized, but none are required: - * `alert_uid`: a unique alert ID for grouping. - * `title`: a title. - * `image_url`: a URL for an image attached to alert. - * `state`: either `ok` or `alerting`. Helpful for auto-resolving. - * `link_to_upstream_details`: link back to your monitoring system. - * `message`: alert details. +- **Webhook** will pull all of the raw JSON payload and display it in the manner that it's received. +- **Formatted Webhook** can be used if the alert payload sent by your monitoring service is formatted in a way that OnCall recognizes. + + The following fields are recognized, but none are required: + + - `alert_uid`: a unique alert ID for grouping. + - `title`: a title. + - `image_url`: a URL for an image attached to alert. + - `state`: either `ok` or `alerting`. Helpful for auto-resolving. + - `link_to_upstream_details`: link back to your monitoring system. + - `message`: alert details. To configure a webhook integration: 1. In the **Integrations** tab, click **+ New integration for receiving alerts**. -2. Select either **Webhook** or **Formatted Webhook** integration. +2. Select either **Webhook** or **Formatted Webhook** integration. 3. Follow the configuration steps in the **How to connect** section of the integration settings. 4. Use the unique webhook URL to complete any configuration in your monitoring service to send POST requests. Use any http client, e.g. curl to send POST requests with any payload. - - For example: + +For example: ```json curl -X POST \ @@ -46,5 +55,4 @@ To configure a webhook integration: }' ``` - -To learn how to use custom alert templates for formatted webhooks, see [Configure alerts in Grafana OnCall]({{< relref "../create-custom-templates/" >}}). \ No newline at end of file +To learn how to use custom alert templates for formatted webhooks, see [Configure alerts in Grafana OnCall]({{< relref "../create-custom-templates/" >}}). diff --git a/docs/sources/integrations/available-integrations /_index.md b/docs/sources/integrations/available-integrations /_index.md index 6898f87a..6e30a415 100644 --- a/docs/sources/integrations/available-integrations /_index.md +++ b/docs/sources/integrations/available-integrations /_index.md @@ -1,21 +1,26 @@ -+++ -title = "Currently available integrations for Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"] -aliases = ["/docs/grafana-cloud/oncall/integrations/add-integration/"] -weight = 100 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/integrations/add-integration/ + - /docs/oncall/latest/integrations/available-integrations / +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Alertmanager + - Prometheus +title: Currently available integrations for Grafana OnCall +weight: 100 +--- # Currently available integrations Grafana OnCall can connect directly to the monitoring services where your alerts originate. All currently available integrations are listed in the Grafana OnCall **Create Integration** section. -If the integration you're looking for isn't currently listed, see [Configure Webhook integrations for Grafana OnCall]({{< relref " ../add-webhook-integration/" >}}) to integration your monitoring system with Grafana OnCall. - ->**Note:** Some integrations are available for Grafana Cloud instances only. See individual integration guides for more information. +If the integration you're looking for isn't currently listed, see [Configure Webhook integrations for Grafana OnCall]({{< relref " ../add-webhook-integration/" >}}) to integration your monitoring system with Grafana OnCall. +> **Note:** Some integrations are available for Grafana Cloud instances only. See individual integration guides for more information. The following integrations are currently available for Grafana OnCall and have documentation: - - {{< section >}} diff --git a/docs/sources/integrations/available-integrations /add-alertmanager.md b/docs/sources/integrations/available-integrations /add-alertmanager.md index 2fd195fb..f5604f4e 100644 --- a/docs/sources/integrations/available-integrations /add-alertmanager.md +++ b/docs/sources/integrations/available-integrations /add-alertmanager.md @@ -1,13 +1,21 @@ -+++ -title = "Connect Alert Manager to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Alertmanager", "Prometheus"] -aliases = ["/docs/grafana-cloud/oncall/available-integrations/add-alertmanager/"] -weight = 300 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/available-integrations/add-alertmanager/ + - /docs/oncall/latest/integrations/available-integrations /add-alertmanager/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Alertmanager + - Prometheus +title: Connect Alert Manager to Grafana OnCall +weight: 300 +--- # Connect AlertManager to Grafana OnCall -The AlertManager integration for Grafana OnCall handles alerts sent by client applications such as the Prometheus server. +The AlertManager integration for Grafana OnCall handles alerts sent by client applications such as the Prometheus server. Grafana OnCall provides grouping abilities when processing alerts from Alert Manager, including initial deduplicating, grouping, and routing the alerts to Grafana OnCall. @@ -26,6 +34,7 @@ You must have an Admin role to create integrations in Grafana OnCall. ## Configure AlertManager Update the `receivers` section of your Alertmanager configuration to use a unique integration URL: + ``` route: receiver: 'oncall' @@ -47,12 +56,12 @@ AlertManager offers three alert grouping options: - `group_by` provides two options, `instance` or `job`. - `group_wait` sets the length of time to initially wait before sending a notification for a particular group of alerts. For example, `group_wait` can be set to 45s. - Setting a high value for `group_wait` reduces alert noise and minimizes interruption, but it may introduce delays in receiving alert notifications. To set an appropriate wait time, consider whether the group of alerts will be the same as those previously sent. + Setting a high value for `group_wait` reduces alert noise and minimizes interruption, but it may introduce delays in receiving alert notifications. To set an appropriate wait time, consider whether the group of alerts will be the same as those previously sent. - `group_interval` sets the length of time to wait before sending notifications about new alerts that have been added to a group of alerts that have been previously alerted on. This setting is usually set to five minutes or more. - During high alert volume periods, AlertManager will send alerts at each `group_interval`, which can mean a lot of distraction. Grafana OnCall grouping will help manage this in the following ways: + During high alert volume periods, AlertManager will send alerts at each `group_interval`, which can mean a lot of distraction. Grafana OnCall grouping will help manage this in the following ways: - - Grafana OnCall groups alerts based on the first label of each alert. + - Grafana OnCall groups alerts based on the first label of each alert. - - Grafana OnCall marks an incident as resolved only when the amount of grouped alerts with state `resolved` equals the amount of alerts with state `firing`. + - Grafana OnCall marks an incident as resolved only when the amount of grouped alerts with state `resolved` equals the amount of alerts with state `firing`. diff --git a/docs/sources/integrations/available-integrations /add-grafana-alerting.md b/docs/sources/integrations/available-integrations /add-grafana-alerting.md index 885e9be6..0e8519dd 100644 --- a/docs/sources/integrations/available-integrations /add-grafana-alerting.md +++ b/docs/sources/integrations/available-integrations /add-grafana-alerting.md @@ -1,9 +1,16 @@ -+++ -title = "Connect Grafana Alerting to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Prometheus"] -aliases = ["/docs/grafana-cloud/oncall/integrations/add-grafana-alerting/"] -weight = 100 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/integrations/add-grafana-alerting/ + - /docs/oncall/latest/integrations/available-integrations /add-grafana-alerting/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Prometheus +title: Connect Grafana Alerting to Grafana OnCall +weight: 100 +--- # Connect Grafana Alerting to Grafana OnCall @@ -13,7 +20,7 @@ Grafana Alerting for Grafana OnCall can be set up using two methods: - Grafana (Other Grafana): Grafana OnCall is connected to one or more Grafana instances separate from the one being used to manage Grafana OnCall. -## Configure Grafana Alerting for Grafana OnCall +## Configure Grafana Alerting for Grafana OnCall You must have an Admin role to create integrations in Grafana OnCall. @@ -23,21 +30,21 @@ You must have an Admin role to create integrations in Grafana OnCall. 3. Follow the configuration steps that display in the **How to connect** window to retrieve your unique integration URL and complete any necessary configurations. - ### Configure Grafana Cloud Alerting + Use the following method if you are connecting Grafana OnCall with alerts coming from the same Grafana instance from which Grafana OnCall is being managed. 1. In Grafana OnCall, navigate to the **Integrations** tab and select **New Integration for receiving alerts**. -1. Click **Quick connect** in the **Grafana Alerting** tile. This will automatically create the integration in Grafana OnCall as well as the required contact point in Alerting. +1. Click **Quick connect** in the **Grafana Alerting** tile. This will automatically create the integration in Grafana OnCall as well as the required contact point in Alerting. - >**Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/) + > **Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/) -1. Determine the escalation chain for the new integration by either selecting an existing one or by creating a new escalation chain. +1. Determine the escalation chain for the new integration by either selecting an existing one or by creating a new escalation chain. -2. In Grafana Cloud Alerting, navigate to **Alerting > Contact Points** and find a contact point with a name matching the integration you created in Grafana OnCall. +1. In Grafana Cloud Alerting, navigate to **Alerting > Contact Points** and find a contact point with a name matching the integration you created in Grafana OnCall. -3. Click the **Edit** (pencil) icon, then click **Test**. This will send a test alert to Grafana OnCall. +1. Click the **Edit** (pencil) icon, then click **Test**. This will send a test alert to Grafana OnCall. ### Configure Grafana (Other Grafana) @@ -57,6 +64,6 @@ Connect Grafana OnCall with alerts coming from a Grafana instance that is differ 7. Choose the contact point type `webhook`, then paste the URL generated in step 3 into the URL field. - >**Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/). + > **Note:** You must connect the contact point with a notification policy. For more information, see [Contact points in Grafana Alerting](https://grafana.com/docs/grafana/latest/alerting/unified-alerting/contact-points/). -8. Click the **Edit** (pencil) icon, then click **Test**. This will send a test alert to Grafana OnCall. \ No newline at end of file +8. Click the **Edit** (pencil) icon, then click **Test**. This will send a test alert to Grafana OnCall. diff --git a/docs/sources/integrations/available-integrations /add-zabbix.md b/docs/sources/integrations/available-integrations /add-zabbix.md index 26b04f65..a85e2477 100644 --- a/docs/sources/integrations/available-integrations /add-zabbix.md +++ b/docs/sources/integrations/available-integrations /add-zabbix.md @@ -1,14 +1,20 @@ -+++ -title = "Connect Zabbix to Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Zabbix"] -weight = 500 -+++ +--- +aliases: + - /docs/oncall/latest/integrations/available-integrations /add-zabbix/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Zabbix +title: Connect Zabbix to Grafana OnCall +weight: 500 +--- # Connect Zabbix to Grafana OnCall Zabbix is an open-source monitoring software tool for diverse IT components, including networks, servers, virtual machines, and cloud services. Zabbix provides monitoring for metrics such as network utilization, CPU load, and disk space consumption. - ## Configure Zabbix integration for Grafana OnCall This integration is available for Grafana Cloud OnCall. You must have an Admin role to create integrations in Grafana OnCall. @@ -17,86 +23,92 @@ This integration is available for Grafana Cloud OnCall. You must have an Admin r 2. Select **Zabbix** from the list of available integrations 3. Follow the instructions in the **How to connect** window to get your unique integration URL and review next steps. - ## Configure the Zabbix server 1. Deploy a Zabbix playground if you don't have one set up: - ```bash - docker run --name zabbix-appliance -t \ - -p 10051:10051 \ - -p 80:80 \ - -d zabbix/zabbix-appliance:latest - ``` + + ```bash + docker run --name zabbix-appliance -t \ + -p 10051:10051 \ + -p 80:80 \ + -d zabbix/zabbix-appliance:latest + ``` 1. Establish an ssh connection to a Zabbix server. - ```bash - docker exec -it zabbix-appliance bash - ``` + ```bash + docker exec -it zabbix-appliance bash + ``` 1. Place the [grafana_oncall.sh](#grafana_oncallsh-script) script in the `AlertScriptsPath` directory specified within the Zabbix server configuration file (zabbix_server.conf). - ```bash - grep AlertScriptsPath /etc/zabbix/zabbix_server.conf - ``` - >**Note:** The script must be executable by the user running the zabbix_server binary (usually "zabbix") on the Zabbix server. For example, `chmod +x grafana_oncall.sh` + ```bash + grep AlertScriptsPath /etc/zabbix/zabbix_server.conf + ``` - ``` bash - ls -lh /usr/lib/zabbix/alertscripts/grafana_oncall.sh - -rw-r--r-- 1 root root 1.5K Jun 6 07:52 /usr/lib/zabbix/alertscripts/grafana_oncall.sh - ``` + > **Note:** The script must be executable by the user running the zabbix_server binary (usually "zabbix") on the Zabbix server. For example, `chmod +x grafana_oncall.sh` + + ```bash + ls -lh /usr/lib/zabbix/alertscripts/grafana_oncall.sh + -rw-r--r-- 1 root root 1.5K Jun 6 07:52 /usr/lib/zabbix/alertscripts/grafana_oncall.sh + ``` ## Configure Zabbix alerts + Within Zabbix web interface, do the following: 1. In a browser, open localhost:80. 1. Navigate to **Adminitstration > Media Types > Create Media Type**. - + 1. Create a Media Type with the following fields. - * Name: Grafana OnCall - * Type: script - * Script parameters: - * {ALERT.SENDTO} - * {ALERT.SUBJECT} - * {ALERT.MESSAGE} - + - Name: Grafana OnCall + - Type: script + - Script parameters: + - {ALERT.SENDTO} + - {ALERT.SUBJECT} + - {ALERT.MESSAGE} + + ### Set the {ALERT.SEND_TO} value + To send alerts to Grafana OnCall, the {ALERT.SEND_TO} value must be set in the [user media configuration](https://www.zabbix.com/documentation/3.4/manual/config/notifications/media/script#user_media). -1. In the web UI, navigate to **Administration > Users** and open the **user properties** form. +1. In the web UI, navigate to **Administration > Users** and open the **user properties** form. 1. In the **Media** tab, click **Add** and copy the link from Grafana OnCall in the `Send to` field. - + 1. Click **Test** in the last column to send a test alert to Grafana OnCall. - + 1. Specify **Send to** OnCall using the unique integration URL from the above step in the testing window that opens. -Create a test message with a body and optional subject and click **Test**. - + Create a test message with a body and optional subject and click **Test**. + ## Grouping and auto-resolve of Zabbix notifications + Grafana OnCall provides grouping and auto-resolve of Zabbix notifications. Use the following procedure to configure grouping and auto-resolve. -1. Provide a parameter as an identifier for group differentiation to Grafana OnCall. +1. Provide a parameter as an identifier for group differentiation to Grafana OnCall. -1. Append that variable to the subject of the action as `ONCALL_GROUP: ID`, where `ID` is any of the Zabbix [macros](https://www.zabbix.com/documentation/4.2/manual/appendix/macros/supported_by_location). -For example, `{EVENT.ID}`. The Grafana OnCall script [grafana_oncall.sh](#grafana_oncallsh-script) extracts this event and passes the `alert_uid` to Grafana OnCall. +1. Append that variable to the subject of the action as `ONCALL_GROUP: ID`, where `ID` is any of the Zabbix [macros](https://www.zabbix.com/documentation/4.2/manual/appendix/macros/supported_by_location). + For example, `{EVENT.ID}`. The Grafana OnCall script [grafana_oncall.sh](#grafana_oncallsh-script) extracts this event and passes the `alert_uid` to Grafana OnCall. 1. To enable auto-resolve within Grafana Oncall, the "Resolved" keyword is required in the **Default subject** field in **Recovered operations**. - + ## grafana_oncall.sh script + ```bash #!/bin/bash # This is the modification of original ericos's shell script. @@ -137,4 +149,5 @@ return=$(curl $url -d "${payload}" -H "Content-Type: application/json" -X POST) ``` ## More Information -For more information on Zabbix scripts, see [scripts for notifications](https://www.zabbix.com/documentation/4.2/manual/config/notifications/media/script). \ No newline at end of file + +For more information on Zabbix scripts, see [scripts for notifications](https://www.zabbix.com/documentation/4.2/manual/config/notifications/media/script). diff --git a/docs/sources/integrations/configure-outgoing-webhooks.md b/docs/sources/integrations/configure-outgoing-webhooks.md index 4a0b56ab..dda09988 100644 --- a/docs/sources/integrations/configure-outgoing-webhooks.md +++ b/docs/sources/integrations/configure-outgoing-webhooks.md @@ -1,37 +1,45 @@ -+++ -title = "Configure outgoing webhooks for Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "amixr", "webhooks"] -weight = 500 -+++ +--- +aliases: + - /docs/oncall/latest/integrations/configure-outgoing-webhooks/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - amixr + - webhooks +title: Configure outgoing webhooks for Grafana OnCall +weight: 500 +--- # Configure outgoing webhooks for Grafana OnCall -Outgoing webhooks allow you to send alert details to a specified URL from Grafana OnCall. Once an outgoing webhook is configured, you can use it as a notification method in escalation chains. +Outgoing webhooks allow you to send alert details to a specified URL from Grafana OnCall. Once an outgoing webhook is configured, you can use it as a notification method in escalation chains. +To automatically send alert data to a destination URL via outgoing webhook: -To automatically send alert data to a destination URL via outgoing webhook: 1. In Grafana OnCall, navigate to **Outgoing Webhooks** and click **+ Create**. - This is also the place to edit and delete existing outgoing webhooks. + This is also the place to edit and delete existing outgoing webhooks. 2. Provide a name for your outgoing webhook and enter the destination URL. 3. If the destination requires authentication, enter your credentials. - You can enter a username and password (HTTP) or an authorization header formatted in JSON. + You can enter a username and password (HTTP) or an authorization header formatted in JSON. -4. Configure the webhook payload in the **Data** field. - +4. Configure the webhook payload in the **Data** field. 5. Click **Create Webhook**. The format you use to call the variables must match the structure of how the fields are nested in the alert payload. The **Data** field can use the following four variables to auto-populate the webhook payload with information about the first alert in the alert group: + - `{{ alert_title }}` - `{{ alert_message }}` -- `{{ alert_url }}` +- `{{ alert_url }}` - `{{ alert_payload }}` -
+
`alert_payload` is always the first level of any variable you want to call. -The following is an example of an entry in the **Data** field that might return an alert name and description. +The following is an example of an entry in the **Data** field that might return an alert name and description. ```json { @@ -40,5 +48,4 @@ The following is an example of an entry in the **Data** field that might return } ``` ->**NOTE:** If you receive an error message and cannot create an outgoing webhook, verify that your JSON is formatted correctly. - +> **NOTE:** If you receive an error message and cannot create an outgoing webhook, verify that your JSON is formatted correctly. diff --git a/docs/sources/integrations/create-custom-templates.md b/docs/sources/integrations/create-custom-templates.md index 9d2add94..676278db 100644 --- a/docs/sources/integrations/create-custom-templates.md +++ b/docs/sources/integrations/create-custom-templates.md @@ -1,12 +1,19 @@ -+++ -title = "Configure alerts in Grafana OnCall" -keywords = ["Grafana Cloud", "Alerts", "Notifications", "on-call", "Jinja"] -weight = 300 -+++ +--- +aliases: + - /docs/oncall/latest/integrations/create-custom-templates/ +keywords: + - Grafana Cloud + - Alerts + - Notifications + - on-call + - Jinja +title: Configure alerts in Grafana OnCall +weight: 300 +--- # Configure alerts in Grafana OnCall - Grafana OnCall can integrate with any monitoring systems that can send alerts using webhooks with JSON payloads. By default, webhooks deliver raw JSON payloads. When Grafana OnCall receives an alert and parses its payload, a default pre configured alert template is applied to modify the alert payload to be more human readable. These alert templates are customizable for any integration. +Grafana OnCall can integrate with any monitoring systems that can send alerts using webhooks with JSON payloads. By default, webhooks deliver raw JSON payloads. When Grafana OnCall receives an alert and parses its payload, a default pre configured alert template is applied to modify the alert payload to be more human readable. These alert templates are customizable for any integration. See Format alerts with alert templates in this document to learn more about how to customize alert templates. @@ -24,83 +31,88 @@ Alerts received by Grafana OnCall contain metadata as keys and values in a JSON ```json { - "dashboardId":1, - "title":"[Alerting] Panel Title alert", - "message":"Notification Message", - "evalMatches":[ + "dashboardId": 1, + "title": "[Alerting] Panel Title alert", + "message": "Notification Message", + "evalMatches": [ { - "value":1, - "metric":"Count", - "tags":{} + "value": 1, + "metric": "Count", + "tags": {} } ], - "imageUrl":"https://grafana.com/static/assets/img/blog/mixed_styles.png", - "orgId":1, - "panelId":2, - "ruleId":1, - "ruleName":"Panel Title alert", - "ruleUrl":"http://localhost:3000/d/hZ7BuVbWz/test-dashboard?fullscreen\u0026edit\u0026tab=alert\u0026panelId=2\u0026orgId=1", - "state":"alerting", - "tags":{ - "tag name":"tag value" + "imageUrl": "https://grafana.com/static/assets/img/blog/mixed_styles.png", + "orgId": 1, + "panelId": 2, + "ruleId": 1, + "ruleName": "Panel Title alert", + "ruleUrl": "http://localhost:3000/d/hZ7BuVbWz/test-dashboard?fullscreen\u0026edit\u0026tab=alert\u0026panelId=2\u0026orgId=1", + "state": "alerting", + "tags": { + "tag name": "tag value" } } ``` In Grafana OnCall every alert and alert group has the following fields: + - `Title`, `message` and `image url` - `Grouping Id` - `Resolve Signal` -The JSON payload is converted. For example: -* `{{ payload.title }}` -> Title -* `{{ payload.message }}` -> Message -* `{{ payload.imageUrl }}` -> Image Url +The JSON payload is converted. For example: + +- `{{ payload.title }}` -> Title +- `{{ payload.message }}` -> Message +- `{{ payload.imageUrl }}` -> Image Url The result is that each field of the alert in OnCall is now mapped to the JSON payload keys. This also true for the alert behavior: -* `{{ payload.ruleId }}` -> Grouping Id -* `{{ 1 if payload.state == 'OK' else 0 }}` -> Resolve Signal +- `{{ payload.ruleId }}` -> Grouping Id +- `{{ 1 if payload.state == 'OK' else 0 }}` -> Resolve Signal Grafana OnCall provides a pre configured default Jinja template for supported integrations. If your monitoring system is not in the Grafana OnCall integrations list, you can create a generic `webhook` integration, send an alert, and configure your templates. +## Customize alerts with alert templates -## Customize alerts with alert templates - -Alert templates allow you to format any alert fields recognized by Grafana OnCall. You can customize default alert templates for all the different ways you receive your alerts such as web, slack, SMS, and email. For more advanced customization, use Jinja templates. +Alert templates allow you to format any alert fields recognized by Grafana OnCall. You can customize default alert templates for all the different ways you receive your alerts such as web, slack, SMS, and email. For more advanced customization, use Jinja templates. As a best practice, add _Playbooks_, _Useful links_, or _Checklists_ to the alert message. To customize alert templates in Grafana OnCall: -1. Navigate to the **Integrations** tab, select the integration, then click **Change alert template and grouping**. +1. Navigate to the **Integrations** tab, select the integration, then click **Change alert template and grouping**. 2. In Alert Templates, select a template from the **Edit template for** dropdown. 3. Edit the Appearances template as needed: - * `Title`, `Message`, `Image url` for Web - * `Title`, `Message`, `Image url` for Slack - * `Title` used for SMS - * `Title` used for Phone - * `Title`, `Message` used for Email + + - `Title`, `Message`, `Image url` for Web + - `Title`, `Message`, `Image url` for Slack + - `Title` used for SMS + - `Title` used for Phone + - `Title`, `Message` used for Email 4. Edit the alert behavior as needed: - * `Grouping Id` - This output groups other alerts into a single alert group. - * `Acknowledge Condition` - The output should be `ok`, `true`, or `1` to auto-acknowledge the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`. - * `Resolve Condition` - The output should be `ok`, `true` or `1` to auto-resolve the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`. - * `Source Link` - Used to customize the URL link to provide as the "source" of the alert. + - `Grouping Id` - This output groups other alerts into a single alert group. + - `Acknowledge Condition` - The output should be `ok`, `true`, or `1` to auto-acknowledge the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`. + - `Resolve Condition` - The output should be `ok`, `true` or `1` to auto-resolve the alert group. For example, `{{ 1 if payload.state == 'OK' else 0 }}`. + - `Source Link` - Used to customize the URL link to provide as the "source" of the alert. ## Advanced Jinja templates - Grafana OnCall uses [Jinja templating language](http://jinja.pocoo.org/docs/2.10/) to format alert groups for the Web, Slack, phone calls, SMS messages, and more because the JSON format is not easily readable by humans. As a result, you can decide what you want to see when an alert group is triggered as well as how it should be presented. - + +Grafana OnCall uses [Jinja templating language](http://jinja.pocoo.org/docs/2.10/) to format alert groups for the Web, Slack, phone calls, SMS messages, and more because the JSON format is not easily readable by humans. As a result, you can decide what you want to see when an alert group is triggered as well as how it should be presented. + Jinja2 offers simple but multi-faceted functionality by using loops, conditions, functions, and more. -> **NOTE:** Every alert from a monitoring system comes in the key/value format. +> **NOTE:** Every alert from a monitoring system comes in the key/value format. + Grafana OnCall has rules about which of the keys match to: `__title`, `message`, `image`, `grouping`, and `auto-resolve__`. ### Loops Monitoring systems can send an array of values. In this example, you can use Jinja to iterate and format the alert using a Grafana example: + ```.jinja2 *Values:* {% for evalMatch in payload.evalMatches -%} @@ -109,9 +121,10 @@ Monitoring systems can send an array of values. In this example, you can use Jin ``` ### Conditions + You can add instructions if an alert comes from a specified Grafana alert rule: -```jinja2 +````jinja2 {% if payload.ruleId == '1' -%} *Alert TODOs* 1. Get acess to the container @@ -122,22 +135,26 @@ You can add instructions if an alert comes from a specified Grafana alert rule: 3. Open the container and reload caches. 4. Click Custom Button `Send to Jira` {%- endif -%} -``` +```` ### Built-in Jinja functions Jinja2 includes built-in functions that can also be used in Grafana OnCall. For example: + ```.jinja2 {{ payload | tojson_pretty }} ``` + Built-in functions: -* `abs` -* `capitalize` -* `trim` -* You can see the full list of Jinja built-in functions on github [here](https://github.com/pallets/jinja/blob/3915eb5c2a7e2e4d49ebdf0ecb167ea9c21c60b2/src/jinja2/filters.py#L1307) + +- `abs` +- `capitalize` +- `trim` +- You can see the full list of Jinja built-in functions on github [here](https://github.com/pallets/jinja/blob/3915eb5c2a7e2e4d49ebdf0ecb167ea9c21c60b2/src/jinja2/filters.py#L1307) ### Functions added by Grafana OnCall -* `time` - current time -* `tojson_pretty` - JSON prettified -* `iso8601_to_time` - converts time from iso8601 (`2015-02-17T18:30:20.000Z`) to datetime -* `datetimeformat` - converts time from datetime to the given format (`%H:%M / %d-%m-%Y` by default) + +- `time` - current time +- `tojson_pretty` - JSON prettified +- `iso8601_to_time` - converts time from iso8601 (`2015-02-17T18:30:20.000Z`) to datetime +- `datetimeformat` - converts time from datetime to the given format (`%H:%M / %d-%m-%Y` by default) diff --git a/docs/sources/oncall-api-reference/_index.md b/docs/sources/oncall-api-reference/_index.md index 2fee042b..b696b0fc 100644 --- a/docs/sources/oncall-api-reference/_index.md +++ b/docs/sources/oncall-api-reference/_index.md @@ -1,7 +1,9 @@ -+++ -title = "Grafana OnCall HTTP API reference" -weight = 1300 -+++ +--- +aliases: + - /docs/oncall/latest/oncall-api-reference/ +title: Grafana OnCall HTTP API reference +weight: 1300 +--- # HTTP API Reference @@ -23,7 +25,7 @@ curl "api_endpoint_here" --header "Authorization: meowmeowmeow" ``` Note that `meowmeowmeow` is a valid key for test purposes. -Replace `meowmeowmeow` with your API key in production. +Replace `meowmeowmeow` with your API key in production. Grafana OnCall uses API keys to allow access to the API. You can request a new OnCall API key in the API section. @@ -31,33 +33,34 @@ An API key is specific to a user and a Grafana stack. If you want to switch to a ## Pagination -List endpoints such as List Integrations or List Alert Groups return multiple objects. +List endpoints such as List Integrations or List Alert Groups return multiple objects. -The OnCall API returns them in pages. Note that the page size may vary. +The OnCall API returns them in pages. Note that the page size may vary. -| Parameter | Meaning | -|-----------|:-------:| -`count` | The total number of items. It can be `0` if a request does not return any data. -`next` | A link to the next page. It can be `null` if the next page does not contain any data. -`previous` | A link to the previous page. It can be `null` if the previous page does not contain any data. -`results` | The data list. Can be `[]` if a request does not return any data. +| Parameter | Meaning | +| ---------- | :-------------------------------------------------------------------------------------------: | +| `count` | The total number of items. It can be `0` if a request does not return any data. | +| `next` | A link to the next page. It can be `null` if the next page does not contain any data. | +| `previous` | A link to the previous page. It can be `null` if the previous page does not contain any data. | +| `results` | The data list. Can be `[]` if a request does not return any data. | ## Rate Limits Grafana OnCall provides rate limits to ensure alert group notifications will be delivered to your Slack workspace even when some integrations produce a large number of alerts. ### Monitoring integrations Rate Limits + Rate limited response HTTP status: 429 - | Scope | Amount | Time Frame | -|------------------------------|:------:|:----------:| -| Alerts from each integration | 300 | 5 minutes | -| Alerts from the whole team | 500 | 5 minutes | +| ---------------------------- | :----: | :--------: | +| Alerts from each integration | 300 | 5 minutes | +| Alerts from the whole team | 500 | 5 minutes | ## API rate limits + You can reduce or increase rate limits depending on platform status. | Scope | Amount | Time Frame | -|--------------------------|:------:|:--------:| -| API requests per API key | 300 | 5 minutes | \ No newline at end of file +| ------------------------ | :----: | :--------: | +| API requests per API key | 300 | 5 minutes | diff --git a/docs/sources/oncall-api-reference/alertgroups.md b/docs/sources/oncall-api-reference/alertgroups.md index 4094f0b3..9ebec034 100644 --- a/docs/sources/oncall-api-reference/alertgroups.md +++ b/docs/sources/oncall-api-reference/alertgroups.md @@ -1,8 +1,10 @@ -+++ -title = "Alert groups HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/alertgroups/"] -weight = 400 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/alertgroups/ + - /docs/oncall/latest/oncall-api-reference/alertgroups/ +title: Alert groups HTTP API +weight: 400 +--- # List alert groups @@ -10,36 +12,36 @@ weight = 400 curl "{{API_URL}}/api/v1/alert_groups/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "id": "I68T24C13IFW1", - "integration_id": "CFRPV98RPR1U8", - "route_id": "RIYGUJXCPFHXY", - "alerts_count": 3, - "state": "resolved", - "created_at": "2020-05-19T12:37:01.430444Z", - "resolved_at": "2020-05-19T13:37:01.429805Z", - "acknowledged_at": null, - "title": "Memory above 90% threshold" - } - ] + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "I68T24C13IFW1", + "integration_id": "CFRPV98RPR1U8", + "route_id": "RIYGUJXCPFHXY", + "alerts_count": 3, + "state": "resolved", + "created_at": "2020-05-19T12:37:01.430444Z", + "resolved_at": "2020-05-19T13:37:01.429805Z", + "acknowledged_at": null, + "title": "Memory above 90% threshold" + } + ] } ``` These available filter parameters should be provided as `GET` arguments: -* `route_id` -* `integration_id` +- `route_id` +- `integration_id` **HTTP request** @@ -57,12 +59,12 @@ curl "{{API_URL}}/api/v1/alert_groups/I68T24C13IFW1/" \ }' ``` -|Parameter | Required | Description | -|--------- |:--------:|:------------| -`mode` | No | Default setting is `wipe`. `wipe` will remove the payload of all Grafana OnCall group alerts. This is useful if you sent sensitive data to OnCall. All metadata will remain. `DELETE` will trigger the removal of alert groups, alerts, and all related metadata. It will also remove alert group notifications in Slack and other destinations. +| Parameter | Required | Description | +| --------- | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `mode` | No | Default setting is `wipe`. `wipe` will remove the payload of all Grafana OnCall group alerts. This is useful if you sent sensitive data to OnCall. All metadata will remain. `DELETE` will trigger the removal of alert groups, alerts, and all related metadata. It will also remove alert group notifications in Slack and other destinations. | ->**NOTE:** `DELETE` can take a few moments to delete alert groups because Grafana OnCall interacts with 3rd party APIs such as Slack. Please check objects using `GET` to be sure the data is removed. +> **NOTE:** `DELETE` can take a few moments to delete alert groups because Grafana OnCall interacts with 3rd party APIs such as Slack. Please check objects using `GET` to be sure the data is removed. **HTTP request** -`DELETE {{API_URL}}/api/v1/alert_groups/` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/alert_groups/` diff --git a/docs/sources/oncall-api-reference/alerts.md b/docs/sources/oncall-api-reference/alerts.md index f0c65a38..ca67add1 100644 --- a/docs/sources/oncall-api-reference/alerts.md +++ b/docs/sources/oncall-api-reference/alerts.md @@ -1,8 +1,10 @@ -+++ -title = "Alerts HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/alerts"] -weight = 100 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/alerts/ + - /docs/oncall/latest/oncall-api-reference/alerts/ +title: Alerts HTTP API +weight: 100 +--- # List Alerts @@ -10,101 +12,101 @@ weight = 100 curl "{{API_URL}}/api/v1/alerts/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 3, - "next": null, - "previous": null, - "results": [ - { - "id": "AA74DN7T4JQB6", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-05-11T20:07:43Z", - "payload": { - "state": "alerting", - "title": "[Alerting] Test notification", - "ruleId": 0, - "message": "Someone is testing the alert notification within Grafana.", - "ruleUrl": "{{API_URL}}/", - "ruleName": "Test notification", - "evalMatches": [ - { - "tags": null, - "value": 100, - "metric": "High value" - }, - { - "tags": null, - "value": 200, - "metric": "Higher Value" - } - ] - } - }, - { - "id": "AR9SSYFKE2PV7", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-05-11T20:07:54Z", - "payload": { - "state": "alerting", - "title": "[Alerting] Test notification", - "ruleId": 0, - "message": "Someone is testing the alert notification within Grafana.", - "ruleUrl": "{{API_URL}}/", - "ruleName": "Test notification", - "evalMatches": [ - { - "tags": null, - "value": 100, - "metric": "High value" - }, - { - "tags": null, - "value": 200, - "metric": "Higher Value" - } - ] - } - }, - { - "id": "AWJQSGEYYUFGH", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-05-11T20:07:58Z", - "payload": { - "state": "alerting", - "title": "[Alerting] Test notification", - "ruleId": 0, - "message": "Someone is testing the alert notification within Grafana.", - "ruleUrl": "{{API_URL}}/", - "ruleName": "Test notification", - "evalMatches": [ - { - "tags": null, - "value": 100, - "metric": "High value" - }, - { - "tags": null, - "value": 200, - "metric": "Higher Value" - } - ] - } - } - ] + "count": 3, + "next": null, + "previous": null, + "results": [ + { + "id": "AA74DN7T4JQB6", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-05-11T20:07:43Z", + "payload": { + "state": "alerting", + "title": "[Alerting] Test notification", + "ruleId": 0, + "message": "Someone is testing the alert notification within Grafana.", + "ruleUrl": "{{API_URL}}/", + "ruleName": "Test notification", + "evalMatches": [ + { + "tags": null, + "value": 100, + "metric": "High value" + }, + { + "tags": null, + "value": 200, + "metric": "Higher Value" + } + ] + } + }, + { + "id": "AR9SSYFKE2PV7", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-05-11T20:07:54Z", + "payload": { + "state": "alerting", + "title": "[Alerting] Test notification", + "ruleId": 0, + "message": "Someone is testing the alert notification within Grafana.", + "ruleUrl": "{{API_URL}}/", + "ruleName": "Test notification", + "evalMatches": [ + { + "tags": null, + "value": 100, + "metric": "High value" + }, + { + "tags": null, + "value": 200, + "metric": "Higher Value" + } + ] + } + }, + { + "id": "AWJQSGEYYUFGH", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-05-11T20:07:58Z", + "payload": { + "state": "alerting", + "title": "[Alerting] Test notification", + "ruleId": 0, + "message": "Someone is testing the alert notification within Grafana.", + "ruleUrl": "{{API_URL}}/", + "ruleName": "Test notification", + "evalMatches": [ + { + "tags": null, + "value": 100, + "metric": "High value" + }, + { + "tags": null, + "value": 200, + "metric": "Higher Value" + } + ] + } + } + ] } ``` The following available filter parameters should be provided as `GET` arguments: -* `alert_group_id` -* `search`—string-based inclusion search by alert payload +- `alert_group_id` +- `search`—string-based inclusion search by alert payload **HTTP request** -`GET {{API_URL}}/api/v1/alerts/` \ No newline at end of file +`GET {{API_URL}}/api/v1/alerts/` diff --git a/docs/sources/oncall-api-reference/escalation_chains.md b/docs/sources/oncall-api-reference/escalation_chains.md index e11d59dc..24a7eb50 100644 --- a/docs/sources/oncall-api-reference/escalation_chains.md +++ b/docs/sources/oncall-api-reference/escalation_chains.md @@ -1,8 +1,10 @@ -+++ -title = "Escalation Chains HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/escalation_chains"] -weight = 200 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/escalation_chains/ + - /docs/oncall/latest/oncall-api-reference/escalation_chains/ +title: Escalation Chains HTTP API +weight: 200 +--- # Create an escalation chain @@ -20,16 +22,16 @@ The above command returns JSON structured in the following way: ```json { - "id": "FWDL7M6N6I9HE", - "name": "example-chain", - "team_id": null + "id": "FWDL7M6N6I9HE", + "name": "example-chain", + "team_id": null } ``` -| Parameter | Required | Description | -|-----------|:--------:|:------------| -| name | yes | Name of the escalation chain | -| team_id | no | ID of the team | +| Parameter | Required | Description | +| --------- | :------: | :--------------------------- | +| name | yes | Name of the escalation chain | +| team_id | no | ID of the team | **HTTP request** @@ -48,9 +50,9 @@ The above command returns JSON structured in the following way: ```json { - "id": "F5JU6KJET33FE", - "name": "default", - "team_id": null + "id": "F5JU6KJET33FE", + "name": "default", + "team_id": null } ``` @@ -64,23 +66,23 @@ The above command returns JSON structured in the following way: curl "{{API_URL}}/api/v1/escalation_chains/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 2, - "next": null, - "previous": null, - "results": [ - { - "id": "F5JU6KJET33FE", - "name": "default", - "team_id": null - } - ] + "count": 2, + "next": null, + "previous": null, + "results": [ + { + "id": "F5JU6KJET33FE", + "name": "default", + "team_id": null + } + ] } ``` @@ -99,4 +101,4 @@ curl "{{API_URL}}/api/v1/escalation_chains/F5JU6KJET33FE/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/escalation_chains//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/escalation_chains//` diff --git a/docs/sources/oncall-api-reference/escalation_policies.md b/docs/sources/oncall-api-reference/escalation_policies.md index c2ee1ffb..bc1262e4 100644 --- a/docs/sources/oncall-api-reference/escalation_policies.md +++ b/docs/sources/oncall-api-reference/escalation_policies.md @@ -1,8 +1,10 @@ -+++ -title = "Escalation Policies HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/escalation_policies"] -weight = 300 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/escalation_policies/ + - /docs/oncall/latest/oncall-api-reference/escalation_policies/ +title: Escalation Policies HTTP API +weight: 300 +--- # Create an escalation policy @@ -22,28 +24,28 @@ The above command returns JSON structured in the following way: ```json { - "id": "E3GA6SJETWWJS", - "escalation_chain_id": "F5JU6KJET33FE", - "position": 0, - "type": "wait", - "duration": 60 + "id": "E3GA6SJETWWJS", + "escalation_chain_id": "F5JU6KJET33FE", + "position": 0, + "type": "wait", + "duration": 60 } ``` -|Parameter | Required | Description | -|----------|:--------:|:------------| -`escalation_chain_id` | Yes | Each escalation policy is assigned to a specific escalation chain. -`position` | Optional | Escalation policies execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down in the list. -`type` | Yes | One of: `wait`, `notify_persons`, `notify_person_next_each_time`, `notify_on_call_from_schedule`, `notify_user_group`, `trigger_action`, `resolve`, `notify_whole_channel`, `notify_if_time_from_to`. -`duration` | Optional | The duration, in seconds, when type `wait` is chosen. -`important` | Optional | Default is `false`. Will assign "important" to personal notification rules if `true`. This can be used to distinguish alerts on which you want to be notified immediately by phone. Applicable for types `notify_persons`, `notify_on_call_from_schedule`, and `notify_user_group`. -`action_to_trigger` | If type = `trigger_action` | ID of an action, or webhook. -`group_to_notify` | If type = `notify_user_group` | ID of a `User Group`. -`persons_to_notify` | If type = `notify_persons` | List of user IDs. -`persons_to_notify_next_each_time` | If type = `notify_person_next_each_time` | List of user IDs. -`notify_on_call _from_schedule` | If type = `notify_on_call_from_schedule` | ID of a Schedule. -`notify_if_time_from` | If type = `notify_if_time_from_to` | UTC time represents the beginning of the time period, for example `09:00:00Z`. -`notify_if_time_to` | If type = `notify_if_time_from_to` | UTC time represents the end of the time period, for example `18:00:00Z`. +| Parameter | Required | Description | +| ---------------------------------- | :--------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `escalation_chain_id` | Yes | Each escalation policy is assigned to a specific escalation chain. | +| `position` | Optional | Escalation policies execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down in the list. | +| `type` | Yes | One of: `wait`, `notify_persons`, `notify_person_next_each_time`, `notify_on_call_from_schedule`, `notify_user_group`, `trigger_action`, `resolve`, `notify_whole_channel`, `notify_if_time_from_to`. | +| `duration` | Optional | The duration, in seconds, when type `wait` is chosen. | +| `important` | Optional | Default is `false`. Will assign "important" to personal notification rules if `true`. This can be used to distinguish alerts on which you want to be notified immediately by phone. Applicable for types `notify_persons`, `notify_on_call_from_schedule`, and `notify_user_group`. | +| `action_to_trigger` | If type = `trigger_action` | ID of an action, or webhook. | +| `group_to_notify` | If type = `notify_user_group` | ID of a `User Group`. | +| `persons_to_notify` | If type = `notify_persons` | List of user IDs. | +| `persons_to_notify_next_each_time` | If type = `notify_person_next_each_time` | List of user IDs. | +| `notify_on_call _from_schedule` | If type = `notify_on_call_from_schedule` | ID of a Schedule. | +| `notify_if_time_from` | If type = `notify_if_time_from_to` | UTC time represents the beginning of the time period, for example `09:00:00Z`. | +| `notify_if_time_to` | If type = `notify_if_time_from_to` | UTC time represents the end of the time period, for example `18:00:00Z`. | **HTTP request** @@ -62,11 +64,11 @@ The above command returns JSON structured in the following way: ```json { - "id": "E3GA6SJETWWJS", - "escalation_chain_id": "F5JU6KJET33FE", - "position": 0, - "type": "wait", - "duration": 60 + "id": "E3GA6SJETWWJS", + "escalation_chain_id": "F5JU6KJET33FE", + "position": 0, + "type": "wait", + "duration": 60 } ``` @@ -80,40 +82,38 @@ The above command returns JSON structured in the following way: curl "{{API_URL}}/api/v1/escalation_policies/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 2, - "next": null, - "previous": null, - "results": [ - { - "id": "E3GA6SJETWWJS", - "escalation_chain_id": "F5JU6KJET33FE", - "position": 0, - "type": "wait", - "duration": 60 - }, - { - "id": "E5JJTU52M5YM4", - "escalation_chain_id": "F5JU6KJET33FE", - "position": 1, - "type": "notify_person_next_each_time", - "persons_to_notify_next_each_time": [ - "U4DNY931HHJS5" - ] - } - ] + "count": 2, + "next": null, + "previous": null, + "results": [ + { + "id": "E3GA6SJETWWJS", + "escalation_chain_id": "F5JU6KJET33FE", + "position": 0, + "type": "wait", + "duration": 60 + }, + { + "id": "E5JJTU52M5YM4", + "escalation_chain_id": "F5JU6KJET33FE", + "position": 1, + "type": "notify_person_next_each_time", + "persons_to_notify_next_each_time": ["U4DNY931HHJS5"] + } + ] } ``` The following available filter parameter should be provided as a `GET` argument: -* `escalation_chain_id` +- `escalation_chain_id` **HTTP request** @@ -130,4 +130,4 @@ curl "{{API_URL}}/api/v1/escalation_policies/E3GA6SJETWWJS/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/escalation_policies//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/escalation_policies//` diff --git a/docs/sources/oncall-api-reference/integrations.md b/docs/sources/oncall-api-reference/integrations.md index 4e5ed02b..d6d7e729 100644 --- a/docs/sources/oncall-api-reference/integrations.md +++ b/docs/sources/oncall-api-reference/integrations.md @@ -1,8 +1,10 @@ -+++ -title = "Integrations HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/integrations/"] -weight = 500 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/integrations/ + - /docs/oncall/latest/oncall-api-reference/integrations/ +title: Integrations HTTP API +weight: 500 +--- # Create an integration @@ -20,52 +22,52 @@ The above command returns JSON structured in the following way: ```json { - "id": "CFRPV98RPR1U8", - "name": "Grafana :blush:", - "team_id": null, - "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", - "type": "grafana", - "default_route": { - "id": "RVBE4RKQSCGJ2", - "escalation_chain_id": "F5JU6KJET33FE", - "slack": { - "channel_id": "CH23212D" - } - }, - "templates": { - "grouping_key": null, - "resolve_signal": null, - "slack": { - "title": null, - "message": null, - "image_url": null - }, - "web": { - "title": null, - "message": null, - "image_url": null - }, - "email": { - "title": null, - "message": null - }, - "sms": { - "title": null - }, - "phone_call": { - "title": null - }, - "telegram": { - "title": null, - "message": null, - "image_url": null - } + "id": "CFRPV98RPR1U8", + "name": "Grafana :blush:", + "team_id": null, + "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", + "type": "grafana", + "default_route": { + "id": "RVBE4RKQSCGJ2", + "escalation_chain_id": "F5JU6KJET33FE", + "slack": { + "channel_id": "CH23212D" } + }, + "templates": { + "grouping_key": null, + "resolve_signal": null, + "slack": { + "title": null, + "message": null, + "image_url": null + }, + "web": { + "title": null, + "message": null, + "image_url": null + }, + "email": { + "title": null, + "message": null + }, + "sms": { + "title": null + }, + "phone_call": { + "title": null + }, + "telegram": { + "title": null, + "message": null, + "image_url": null + } + } } ``` Integrations are sources of alerts and alert groups for Grafana OnCall. -For example, to learn how to integrate Grafana OnCall with Alertmanager see [Alertmanager]({{< relref "../integrations/add-alertmanager" >}}). +For example, to learn how to integrate Grafana OnCall with Alertmanager see [Alertmanager]({{< relref "../integrations/add-alertmanager" >}}). **HTTP request** @@ -84,51 +86,51 @@ The above command returns JSON structured in the following way: ```json { - "id": "CFRPV98RPR1U8", - "name": "Grafana :blush:", - "team_id": null, - "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", - "type": "grafana", - "default_route": { - "id": "RVBE4RKQSCGJ2", - "escalation_chain_id": "F5JU6KJET33FE", - "slack": { - "channel_id": "CH23212D" - } - }, - "templates": { - "grouping_key": null, - "resolve_signal": null, - "slack": { - "title": null, - "message": null, - "image_url": null - }, - "web": { - "title": null, - "message": null, - "image_url": null - }, - "email": { - "title": null, - "message": null - }, - "sms": { - "title": null - }, - "phone_call": { - "title": null - }, - "telegram": { - "title": null, - "message": null, - "image_url": null - } + "id": "CFRPV98RPR1U8", + "name": "Grafana :blush:", + "team_id": null, + "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", + "type": "grafana", + "default_route": { + "id": "RVBE4RKQSCGJ2", + "escalation_chain_id": "F5JU6KJET33FE", + "slack": { + "channel_id": "CH23212D" } + }, + "templates": { + "grouping_key": null, + "resolve_signal": null, + "slack": { + "title": null, + "message": null, + "image_url": null + }, + "web": { + "title": null, + "message": null, + "image_url": null + }, + "email": { + "title": null, + "message": null + }, + "sms": { + "title": null + }, + "phone_call": { + "title": null + }, + "telegram": { + "title": null, + "message": null, + "image_url": null + } + } } ``` -This endpoint retrieves an integration. Integrations are sources of alerts and alert groups for Grafana OnCall. +This endpoint retrieves an integration. Integrations are sources of alerts and alert groups for Grafana OnCall. **HTTP request** @@ -147,54 +149,54 @@ The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "id": "CFRPV98RPR1U8", - "name": "Grafana :blush:", - "team_id": null, - "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", - "type": "grafana", - "default_route": { - "id": "RVBE4RKQSCGJ2", - "escalation_chain_id": "F5JU6KJET33FE", - "slack": { - "channel_id": "CH23212D" - } - }, - "templates": { - "grouping_key": null, - "resolve_signal": null, - "slack": { - "title": null, - "message": null, - "image_url": null - }, - "web": { - "title": null, - "message": null, - "image_url": null - }, - "email": { - "title": null, - "message": null - }, - "sms": { - "title": null - }, - "phone_call": { - "title": null - }, - "telegram": { - "title": null, - "message": null, - "image_url": null - } - } + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "CFRPV98RPR1U8", + "name": "Grafana :blush:", + "team_id": null, + "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", + "type": "grafana", + "default_route": { + "id": "RVBE4RKQSCGJ2", + "escalation_chain_id": "F5JU6KJET33FE", + "slack": { + "channel_id": "CH23212D" } - ] + }, + "templates": { + "grouping_key": null, + "resolve_signal": null, + "slack": { + "title": null, + "message": null, + "image_url": null + }, + "web": { + "title": null, + "message": null, + "image_url": null + }, + "email": { + "title": null, + "message": null + }, + "sms": { + "title": null + }, + "phone_call": { + "title": null + }, + "telegram": { + "title": null, + "message": null, + "image_url": null + } + } + } + ] } ``` @@ -226,47 +228,47 @@ The above command returns JSON structured in the following way: ```json { - "id": "CFRPV98RPR1U8", - "name": "Grafana :blush:", - "team_id": null, - "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", - "type": "grafana", - "default_route": { - "id": "RVBE4RKQSCGJ2", - "escalation_chain_id": "F5JU6KJET33FE", - "slack": { - "channel_id": "CH23212D" - } - }, - "templates": { - "grouping_key": null, - "resolve_signal": null, - "slack": { - "title": null, - "message": null, - "image_url": null - }, - "web": { - "title": null, - "message": null, - "image_url": null - }, - "email": { - "title": null, - "message": null - }, - "sms": { - "title": null - }, - "phone_call": { - "title": null - }, - "telegram": { - "title": null, - "message": null, - "image_url": null - } + "id": "CFRPV98RPR1U8", + "name": "Grafana :blush:", + "team_id": null, + "link": "{{API_URL}}/integrations/v1/grafana/mReAoNwDm0eMwKo1mTeTwYo/", + "type": "grafana", + "default_route": { + "id": "RVBE4RKQSCGJ2", + "escalation_chain_id": "F5JU6KJET33FE", + "slack": { + "channel_id": "CH23212D" } + }, + "templates": { + "grouping_key": null, + "resolve_signal": null, + "slack": { + "title": null, + "message": null, + "image_url": null + }, + "web": { + "title": null, + "message": null, + "image_url": null + }, + "email": { + "title": null, + "message": null + }, + "sms": { + "title": null + }, + "phone_call": { + "title": null + }, + "telegram": { + "title": null, + "message": null, + "image_url": null + } + } } ``` @@ -275,6 +277,7 @@ The above command returns JSON structured in the following way: `PUT {{API_URL}}/api/v1/integrations//` # Delete integration + Deleted integrations will stop recording new alerts from monitoring. Integration removal won't trigger removal of related alert groups or alerts. ```shell diff --git a/docs/sources/oncall-api-reference/on_call_shifts.md b/docs/sources/oncall-api-reference/on_call_shifts.md index fc78a9ed..c3f3a5d0 100644 --- a/docs/sources/oncall-api-reference/on_call_shifts.md +++ b/docs/sources/oncall-api-reference/on_call_shifts.md @@ -1,8 +1,10 @@ -+++ -title = "OnCall shifts HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/on_call_shifts/"] -weight = 600 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/on_call_shifts/ + - /docs/oncall/latest/oncall-api-reference/on_call_shifts/ +title: OnCall shifts HTTP API +weight: 600 +--- # Create an OnCall shift @@ -29,38 +31,36 @@ The above command returns JSON structured in the following way: ```json { - "id": "OH3V5FYQEYJ6M", - "name": "Demo single event", - "type": "single_event", - "team_id": null, - "time_zone": null, - "level": 0, - "start": "2020-09-10T08:00:00", - "duration": 10800, - "users": [ - "U4DNY931HHJS5" - ] + "id": "OH3V5FYQEYJ6M", + "name": "Demo single event", + "type": "single_event", + "team_id": null, + "time_zone": null, + "level": 0, + "start": "2020-09-10T08:00:00", + "duration": 10800, + "users": ["U4DNY931HHJS5"] } ``` -| Parameter | Unique | Required | Description | -|-----------|:------:|:--------:|:------------| -`name` | Yes | Yes | On-call shift name. -`type` | No | Yes | One of: `single_event`, `recurrent_event`, `rolling_users`. -`team_id` | No | ID of the team. -`time_zone` | No | Optional | On-call shift time zone. Default is local schedule time zone. **This field will override the schedule time zone if changed**. For more information see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). -`level` | No | Optional | Priority level. The higher the value, the higher the priority. If two events overlap in one schedule, Grafana OnCall will choose the event with higher level. For example: Alex is on-call from 8AM till 11AM with level 1, Bob is on-call from 9AM till 11AM with level 2. At 10AM Grafana OnCall will notify Bob. At 8AM OnCall will notify Alex. -`start` | No | Yes | Start time of the on-call shift. This parameter takes a date format as `yyyy-MM-dd'T'HH:mm:ss` (for example "2020-09-05T08:00:00"). -`duration` | No | Yes | Duration of the event. -`frequency` | No | If type = `recurrent_event` or `rolling_users` | One of: `daily`, `weekly`, `monthly`. -`interval` | No | Optional | This parameter takes a positive integer that represents the intervals that the recurrence rule repeats. -`week_start` | No | Optional | Start day of the week in iCal format. One of: `SU` (Sunday), `MO` (Monday), `TU` (Tuesday), `WE` (Wednesday), `TH` (Thursday), `FR` (Friday), `SA` (Saturday). Default: `SU`. -`by_day` | No | Optional | List of days in iCal format. Valid values are: `SU`, `MO`, `TU`, `WE`, `TH`, `FR`, `SA`. -`by_month` | No | Optional | List of months. Valid values are `1` to `12`. -`by_monthday` | No | Optional | List of days of the month. Valid values are `1` to `31` or `-31` to `-1`. -`users` | No | Optional | List of on-call users. -`rolling_users` | No | Optional | List of lists with on-call users (for `rolling_users` event type). Grafana OnCall will iterate over lists of users for every time frame specified in `frequency`. For example: there are two lists of users in `rolling_users` : [[Alex, Bob], [Alice]] and `frequency` = `daily` . This means that the first day Alex and Bob will be notified. The next day: Alice. The day after: Alex and Bob again and so on. -`start_rotation_from_user_index` | No | Optional | Index of the list of users in `rolling_users`, from which on-call rotation starts. By default, the start index is `0` +| Parameter | Unique | Required | Description | +| -------------------------------- | :----: | :--------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Yes | Yes | On-call shift name. | +| `type` | No | Yes | One of: `single_event`, `recurrent_event`, `rolling_users`. | +| `team_id` | No | ID of the team. | +| `time_zone` | No | Optional | On-call shift time zone. Default is local schedule time zone. **This field will override the schedule time zone if changed**. For more information see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). | +| `level` | No | Optional | Priority level. The higher the value, the higher the priority. If two events overlap in one schedule, Grafana OnCall will choose the event with higher level. For example: Alex is on-call from 8AM till 11AM with level 1, Bob is on-call from 9AM till 11AM with level 2. At 10AM Grafana OnCall will notify Bob. At 8AM OnCall will notify Alex. | +| `start` | No | Yes | Start time of the on-call shift. This parameter takes a date format as `yyyy-MM-dd'T'HH:mm:ss` (for example "2020-09-05T08:00:00"). | +| `duration` | No | Yes | Duration of the event. | +| `frequency` | No | If type = `recurrent_event` or `rolling_users` | One of: `daily`, `weekly`, `monthly`. | +| `interval` | No | Optional | This parameter takes a positive integer that represents the intervals that the recurrence rule repeats. | +| `week_start` | No | Optional | Start day of the week in iCal format. One of: `SU` (Sunday), `MO` (Monday), `TU` (Tuesday), `WE` (Wednesday), `TH` (Thursday), `FR` (Friday), `SA` (Saturday). Default: `SU`. | +| `by_day` | No | Optional | List of days in iCal format. Valid values are: `SU`, `MO`, `TU`, `WE`, `TH`, `FR`, `SA`. | +| `by_month` | No | Optional | List of months. Valid values are `1` to `12`. | +| `by_monthday` | No | Optional | List of days of the month. Valid values are `1` to `31` or `-31` to `-1`. | +| `users` | No | Optional | List of on-call users. | +| `rolling_users` | No | Optional | List of lists with on-call users (for `rolling_users` event type). Grafana OnCall will iterate over lists of users for every time frame specified in `frequency`. For example: there are two lists of users in `rolling_users` : [[Alex, Bob], [Alice]] and `frequency` = `daily` . This means that the first day Alex and Bob will be notified. The next day: Alice. The day after: Alex and Bob again and so on. | +| `start_rotation_from_user_index` | No | Optional | Index of the list of users in `rolling_users`, from which on-call rotation starts. By default, the start index is `0` | Please see [RFC 5545](https://tools.ietf.org/html/rfc5545#section-3.3.10) for more information about recurrence rules. @@ -81,17 +81,15 @@ The above command returns JSON structured in the following way: ```json { - "id": "OH3V5FYQEYJ6M", - "name": "Demo single event", - "type": "single_event", - "team_id": null, - "time_zone": null, - "level": 0, - "start": "2020-09-10T08:00:00", - "duration": 10800, - "users": [ - "U4DNY931HHJS5" - ] + "id": "OH3V5FYQEYJ6M", + "name": "Demo single event", + "type": "single_event", + "team_id": null, + "time_zone": null, + "level": 0, + "start": "2020-09-10T08:00:00", + "duration": 10800, + "users": ["U4DNY931HHJS5"] } ``` @@ -105,61 +103,53 @@ The above command returns JSON structured in the following way: curl "{{API_URL}}/api/v1/on_call_shifts/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 2, - "next": null, - "previous": null, - "results": [ - { - "id": "OH3V5FYQEYJ6M", - "name": "Demo single event", - "type": "single_event", - "team_id": null, - "time_zone": null, - "level": 0, - "start": "2020-09-10T08:00:00", - "duration": 10800, - "users": [ - "U4DNY931HHJS5" - ] - }, - { - "id": "O9WTH7CKM3KZW", - "name": "Demo recurrent event", - "type": "recurrent_event", - "team_id": null, - "time_zone": null, - "level": 0, - "start": "2020-09-10T16:00:00", - "duration": 10800, - "frequency": "weekly", - "interval": 2, - "week_start": "SU", - "by_day": [ - "MO", - "WE", - "FR" - ], - "by_month": null, - "by_monthday": null, - "users": [ - "U4DNY931HHJS5" - ] - } - ] + "count": 2, + "next": null, + "previous": null, + "results": [ + { + "id": "OH3V5FYQEYJ6M", + "name": "Demo single event", + "type": "single_event", + "team_id": null, + "time_zone": null, + "level": 0, + "start": "2020-09-10T08:00:00", + "duration": 10800, + "users": ["U4DNY931HHJS5"] + }, + { + "id": "O9WTH7CKM3KZW", + "name": "Demo recurrent event", + "type": "recurrent_event", + "team_id": null, + "time_zone": null, + "level": 0, + "start": "2020-09-10T16:00:00", + "duration": 10800, + "frequency": "weekly", + "interval": 2, + "week_start": "SU", + "by_day": ["MO", "WE", "FR"], + "by_month": null, + "by_monthday": null, + "users": ["U4DNY931HHJS5"] + } + ] } ``` The following available filter parameters should be provided as `GET` arguments: -* `name` (Exact match) -* `schedule_id` (Exact match) +- `name` (Exact match) +- `schedule_id` (Exact match) **HTTP request** @@ -188,17 +178,15 @@ The above command returns JSON structured in the following way: ```json { - "id": "OH3V5FYQEYJ6M", - "name": "Demo single event", - "type": "single_event", - "team_id": null, - "time_zone": null, - "level": 0, - "start": "2020-09-10T08:00:00", - "duration": 10800, - "users": [ - "U4DNY931HHJS5" - ] + "id": "OH3V5FYQEYJ6M", + "name": "Demo single event", + "type": "single_event", + "team_id": null, + "time_zone": null, + "level": 0, + "start": "2020-09-10T08:00:00", + "duration": 10800, + "users": ["U4DNY931HHJS5"] } ``` @@ -217,4 +205,4 @@ curl "{{API_URL}}/api/v1/on_call_shifts/S3Z477AHDXTMF/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/on_call_shifts//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/on_call_shifts//` diff --git a/docs/sources/oncall-api-reference/outgoing_webhooks.md b/docs/sources/oncall-api-reference/outgoing_webhooks.md index 5e7e399f..f42d1c07 100644 --- a/docs/sources/oncall-api-reference/outgoing_webhooks.md +++ b/docs/sources/oncall-api-reference/outgoing_webhooks.md @@ -1,8 +1,10 @@ -+++ -title = "Outgoing webhooks HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/outgoing_webhooks/"] -weight = 700 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/outgoing_webhooks/ + - /docs/oncall/latest/oncall-api-reference/outgoing_webhooks/ +title: Outgoing webhooks HTTP API +weight: 700 +--- # Outgoing webhooks (actions) @@ -14,25 +16,25 @@ Used in escalation policies with type `trigger_action`. curl "{{API_URL}}/api/v1/actions/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "id": "KGEFG74LU1D8L", - "name": "Publish alert group notification to JIRA" - } - ] + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "KGEFG74LU1D8L", + "name": "Publish alert group notification to JIRA" + } + ] } ``` **HTTP request** -`GET {{API_URL}}/api/v1/actions/` \ No newline at end of file +`GET {{API_URL}}/api/v1/actions/` diff --git a/docs/sources/oncall-api-reference/personal_notification_rules.md b/docs/sources/oncall-api-reference/personal_notification_rules.md index dca60f79..2b22ec07 100644 --- a/docs/sources/oncall-api-reference/personal_notification_rules.md +++ b/docs/sources/oncall-api-reference/personal_notification_rules.md @@ -1,8 +1,10 @@ -+++ -title = "Personal Notification Rules HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/personal_notification_rules/"] -weight = 800 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/personal_notification_rules/ + - /docs/oncall/latest/oncall-api-reference/personal_notification_rules/ +title: Personal Notification Rules HTTP API +weight: 800 +--- # Post a personal notification rule @@ -21,21 +23,21 @@ The above command returns JSON structured in the following way: ```json { - "id": "NT79GA9I7E4DJ", - "user_id": "U4DNY931HHJS5", - "position": 0, - "important": false, - "type": "notify_by_sms" + "id": "NT79GA9I7E4DJ", + "user_id": "U4DNY931HHJS5", + "position": 0, + "important": false, + "type": "notify_by_sms" } ``` -| Parameter | Required | Description | -|-----------|:--------:|:------------| -`user_id` | Yes | User ID -`position` | Optional | Personal notification rules execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down on the list. -`type` | Yes | One of: `wait`, `notify_by_slack`, `notify_by_sms`, `notify_by_phone_call`, `notify_by_telegram`, `notify_by_email`. -`duration` | Optional | A time in secs when type `wait` is chosen for `type`. -`important` | Optional | Boolean value indicates if a rule is "important". Default is `false`. +| Parameter | Required | Description | +| ----------- | :------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `user_id` | Yes | User ID | +| `position` | Optional | Personal notification rules execute one after another starting from `position=0`. `Position=-1` will put the escalation policy to the end of the list. A new escalation policy created with a position of an existing escalation policy will move the old one (and all following) down on the list. | +| `type` | Yes | One of: `wait`, `notify_by_slack`, `notify_by_sms`, `notify_by_phone_call`, `notify_by_telegram`, `notify_by_email`. | +| `duration` | Optional | A time in secs when type `wait` is chosen for `type`. | +| `important` | Optional | Boolean value indicates if a rule is "important". Default is `false`. | **HTTP request** @@ -47,19 +49,19 @@ The above command returns JSON structured in the following way: curl "{{API_URL}}/api/v1/personal_notification_rules/ND9EHN5LN1DUU/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "id": "ND9EHN5LN1DUU", - "user_id": "U4DNY931HHJS5", - "position": 1, - "duration": 300, - "important": false, - "type": "wait" + "id": "ND9EHN5LN1DUU", + "user_id": "U4DNY931HHJS5", + "position": 1, + "duration": 300, + "important": false, + "type": "wait" } ``` @@ -67,61 +69,60 @@ The above command returns JSON structured in the following way: `GET {{API_URL}}/api/v1/personal_notification_rules//` - # List personal notification rules ```shell curl "{{API_URL}}/api/v1/personal_notification_rules/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following ways: ```json { - "count": 4, - "next": null, - "previous": null, - "results": [ - { - "id": "NT79GA9I7E4DJ", - "user_id": "U4DNY931HHJS5", - "position": 0, - "important": false, - "type": "notify_by_sms" - }, - { - "id": "ND9EHN5LN1DUU", - "user_id": "U4DNY931HHJS5", - "position": 1, - "duration": 300, - "important": false, - "type": "wait" - }, - { - "id": "NEF49YQ1HNPDD", - "user_id": "U4DNY931HHJS5", - "position": 2, - "important": false, - "type": "notify_by_phone_call" - }, - { - "id": "NWAL6WFJNWDD8", - "user_id": "U4DNY931HHJS5", - "position": 0, - "important": true, - "type": "notify_by_phone_call" - } - ] + "count": 4, + "next": null, + "previous": null, + "results": [ + { + "id": "NT79GA9I7E4DJ", + "user_id": "U4DNY931HHJS5", + "position": 0, + "important": false, + "type": "notify_by_sms" + }, + { + "id": "ND9EHN5LN1DUU", + "user_id": "U4DNY931HHJS5", + "position": 1, + "duration": 300, + "important": false, + "type": "wait" + }, + { + "id": "NEF49YQ1HNPDD", + "user_id": "U4DNY931HHJS5", + "position": 2, + "important": false, + "type": "notify_by_phone_call" + }, + { + "id": "NWAL6WFJNWDD8", + "user_id": "U4DNY931HHJS5", + "position": 0, + "important": true, + "type": "notify_by_phone_call" + } + ] } ``` The following available filter parameters should be provided as `GET` arguments: -* `user_id` -* `important` +- `user_id` +- `important` **HTTP Request** @@ -129,7 +130,6 @@ The following available filter parameters should be provided as `GET` arguments: # Delete a personal notification rule - ```shell curl "{{API_URL}}/api/v1/personal_notification_rules/NWAL6WFJNWDD8/" \ --request DELETE \ @@ -139,4 +139,4 @@ curl "{{API_URL}}/api/v1/personal_notification_rules/NWAL6WFJNWDD8/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/personal_notification_rules//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/personal_notification_rules//` diff --git a/docs/sources/oncall-api-reference/postmortem_messages.md b/docs/sources/oncall-api-reference/postmortem_messages.md index d156fc71..ba3e9252 100644 --- a/docs/sources/oncall-api-reference/postmortem_messages.md +++ b/docs/sources/oncall-api-reference/postmortem_messages.md @@ -1,9 +1,11 @@ -+++ -title = "Postmortem Messages HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/postmortem_messages/"] -weight = 900 -draft = true -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/postmortem_messages/ + - /docs/oncall/latest/oncall-api-reference/postmortem_messages/ +draft: true +title: Postmortem Messages HTTP API +weight: 900 +--- # Create a postmortem message @@ -22,14 +24,14 @@ The above command returns JSON structured in the following way: ```json { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" } -``` +``` **HTTP request** @@ -48,12 +50,12 @@ The above command returns JSON structured in the following way: ```json { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" } ``` @@ -74,26 +76,25 @@ The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" - } - ] + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" + } + ] } ``` The following available filter parameter should be provided as a `GET` argument: -* `alert_group_id` - +- `alert_group_id` **HTTP request** @@ -115,12 +116,12 @@ The above command returns JSON structured in the following way: ```json { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" } ``` @@ -138,4 +139,4 @@ curl "{{API_URL}}/api/v1/postmortem_messages/M4BTQUS3PRHYQ/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/postmortem_messages//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/postmortem_messages//` diff --git a/docs/sources/oncall-api-reference/postmortems.md b/docs/sources/oncall-api-reference/postmortems.md index df8580f5..95197687 100644 --- a/docs/sources/oncall-api-reference/postmortems.md +++ b/docs/sources/oncall-api-reference/postmortems.md @@ -1,9 +1,11 @@ -+++ -title = "Postmortem HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/postmortems/"] -weight = 1000 -draft = true -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/postmortems/ + - /docs/oncall/latest/oncall-api-reference/postmortems/ +draft: true +title: Postmortem HTTP API +weight: 1000 +--- # Create a postmortem @@ -22,12 +24,12 @@ The above command returns JSON structured in the following way: ```json { - "id": "P658FE5K87EWZ", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-06-19T12:37:01.430444Z", - "text": "Demo postmortem text" + "id": "P658FE5K87EWZ", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-06-19T12:37:01.430444Z", + "text": "Demo postmortem text" } -``` +``` **HTTP request** @@ -46,20 +48,20 @@ The above command returns JSON structured in the following way: ```json { - "id": "P658FE5K87EWZ", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-06-19T12:37:01.430444Z", - "text": "Demo postmortem text", - "postmortem_messages": [ - { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" - } - ] + "id": "P658FE5K87EWZ", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-06-19T12:37:01.430444Z", + "text": "Demo postmortem text", + "postmortem_messages": [ + { + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" + } + ] } ``` @@ -80,33 +82,33 @@ The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "P658FE5K87EWZ", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-06-19T12:37:01.430444Z", + "text": "Demo postmortem text", + "postmortem_messages": [ { - "id": "P658FE5K87EWZ", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-06-19T12:37:01.430444Z", - "text": "Demo postmortem text", - "postmortem_messages": [ - { - "id": "M4BTQUS3PRHYQ", - "alert_group_id": "I68T24C13IFW1", - "author": "U4DNY931HHJS5", - "source": "web", - "created_at": "2020-06-19T12:40:01.429805Z", - "text": "Demo postmortem message" - } - ] + "id": "M4BTQUS3PRHYQ", + "alert_group_id": "I68T24C13IFW1", + "author": "U4DNY931HHJS5", + "source": "web", + "created_at": "2020-06-19T12:40:01.429805Z", + "text": "Demo postmortem message" } - ] + ] + } + ] } ``` The following available filter parameter should be provided with a `GET` argument: -* `alert_group_id` +- `alert_group_id` **HTTP request** @@ -128,10 +130,10 @@ The above command returns JSON structured in the following way: ```json { - "id": "P658FE5K87EWZ", - "alert_group_id": "I68T24C13IFW1", - "created_at": "2020-06-19T12:37:01.430444Z", - "text": "Demo postmortem text" + "id": "P658FE5K87EWZ", + "alert_group_id": "I68T24C13IFW1", + "created_at": "2020-06-19T12:37:01.430444Z", + "text": "Demo postmortem text" } ``` @@ -149,4 +151,4 @@ curl "{{API_URL}}/api/v1/postmortems/P658FE5K87EWZ/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/postmortems//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/postmortems//` diff --git a/docs/sources/oncall-api-reference/routes.md b/docs/sources/oncall-api-reference/routes.md index e75cb388..11a1a460 100644 --- a/docs/sources/oncall-api-reference/routes.md +++ b/docs/sources/oncall-api-reference/routes.md @@ -1,8 +1,10 @@ -+++ -title = "Routes HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/routes/"] -weight = 1100 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/routes/ + - /docs/oncall/latest/oncall-api-reference/routes/ +title: Routes HTTP API +weight: 1100 +--- # Create a route @@ -26,31 +28,31 @@ The above command returns JSON structured in the following way: ```json { - "id": "RIYGUJXCPFHXY", - "integration_id": "CFRPV98RPR1U8", - "escalation_chain_id": "F5JU6KJET33FE", - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": false, - "slack": { - "channel_id": "CH23212D" - } + "id": "RIYGUJXCPFHXY", + "integration_id": "CFRPV98RPR1U8", + "escalation_chain_id": "F5JU6KJET33FE", + "routing_regex": "us-(east|west)", + "position": 0, + "is_the_last_route": false, + "slack": { + "channel_id": "CH23212D" + } } ``` Routes allow you to direct different alerts to different messenger channels and escalation chains. Useful for: -* Important/non-important alerts -* Alerts for different engineering groups -* Snoozing spam & debugging alerts +- Important/non-important alerts +- Alerts for different engineering groups +- Snoozing spam & debugging alerts -| Parameter | Unique | Required | Description | -|-----------|:------:|:--------:|:------------| -`integration_id` | No | Yes | Each route is assigned to a specific integration. -`escalation_chain_id` | No | Yes | Each route is assigned a specific escalation chain. -`routing_regex` | Yes | Yes | Python Regex query (use https://regex101.com/ for debugging). OnCall chooses the route for an alert in case there is a match inside the whole alert payload. -`position` | Yes | Optional | Route matching is performed one after another starting from position=`0`. Position=`-1` will put the route to the end of the list before `is_the_last_route`. A new route created with a position of an existing route will move the old route (and all following routes) down in the list. -`slack` | Yes | Optional | Dictionary with Slack-specific settings for a route. +| Parameter | Unique | Required | Description | +| --------------------- | :----: | :------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `integration_id` | No | Yes | Each route is assigned to a specific integration. | +| `escalation_chain_id` | No | Yes | Each route is assigned a specific escalation chain. | +| `routing_regex` | Yes | Yes | Python Regex query (use https://regex101.com/ for debugging). OnCall chooses the route for an alert in case there is a match inside the whole alert payload. | +| `position` | Yes | Optional | Route matching is performed one after another starting from position=`0`. Position=`-1` will put the route to the end of the list before `is_the_last_route`. A new route created with a position of an existing route will move the old route (and all following routes) down in the list. | +| `slack` | Yes | Optional | Dictionary with Slack-specific settings for a route. | **HTTP request** @@ -69,15 +71,15 @@ The above command returns JSON structured in the following way: ```json { - "id": "RIYGUJXCPFHXY", - "integration_id": "CFRPV98RPR1U8", - "escalation_chain_id": "F5JU6KJET33FE", - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": false, - "slack": { - "channel_id": "CH23212D" - } + "id": "RIYGUJXCPFHXY", + "integration_id": "CFRPV98RPR1U8", + "escalation_chain_id": "F5JU6KJET33FE", + "routing_regex": "us-(east|west)", + "position": 0, + "is_the_last_route": false, + "slack": { + "channel_id": "CH23212D" + } } ``` @@ -85,7 +87,6 @@ The above command returns JSON structured in the following way: `GET {{API_URL}}/api/v1/routes//` - # List routes ```shell @@ -99,40 +100,40 @@ The above command returns JSON structured in the following way: ```json { - "count": 2, - "next": null, - "previous": null, - "results": [ - { - "id": "RIYGUJXCPFHXY", - "integration_id": "CFRPV98RPR1U8", - "escalation_chain_id": "F5JU6KJET33FE", - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": false, - "slack": { - "channel_id": "CH23212D" - } - }, - { - "id": "RVBE4RKQSCGJ2", - "integration_id": "CFRPV98RPR1U8", - "escalation_chain_id": "F5JU6KJET33FE", - "routing_regex": ".*", - "position": 1, - "is_the_last_route": true, - "slack": { - "channel_id": "CH23212D" - } - } - ] + "count": 2, + "next": null, + "previous": null, + "results": [ + { + "id": "RIYGUJXCPFHXY", + "integration_id": "CFRPV98RPR1U8", + "escalation_chain_id": "F5JU6KJET33FE", + "routing_regex": "us-(east|west)", + "position": 0, + "is_the_last_route": false, + "slack": { + "channel_id": "CH23212D" + } + }, + { + "id": "RVBE4RKQSCGJ2", + "integration_id": "CFRPV98RPR1U8", + "escalation_chain_id": "F5JU6KJET33FE", + "routing_regex": ".*", + "position": 1, + "is_the_last_route": true, + "slack": { + "channel_id": "CH23212D" + } + } + ] } ``` The following available filter parameters should be provided as `GET` arguments: -* `integration_id` -* `routing_regex` (Exact match) +- `integration_id` +- `routing_regex` (Exact match) **HTTP request** @@ -158,15 +159,15 @@ The above command returns JSON structured in the following way: ```json { - "id": "RIYGUJXCPFHXY", - "integration_id": "CFRPV98RPR1U8", - "escalation_chain_id": "F5JU6KJET33FE", - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": false, - "slack": { - "channel_id": "CH23212D" - } + "id": "RIYGUJXCPFHXY", + "integration_id": "CFRPV98RPR1U8", + "escalation_chain_id": "F5JU6KJET33FE", + "routing_regex": "us-(east|west)", + "position": 0, + "is_the_last_route": false, + "slack": { + "channel_id": "CH23212D" + } } ``` @@ -185,4 +186,4 @@ curl "{{API_URL}}/api/v1/routes/RIYGUJXCPFHXY/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/routes//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/routes//` diff --git a/docs/sources/oncall-api-reference/schedules.md b/docs/sources/oncall-api-reference/schedules.md index 22b41b3b..f8de9cb9 100644 --- a/docs/sources/oncall-api-reference/schedules.md +++ b/docs/sources/oncall-api-reference/schedules.md @@ -1,8 +1,10 @@ -+++ -title = "Schedule HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/schedules/"] -weight = 1200 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/schedules/ + - /docs/oncall/latest/oncall-api-reference/schedules/ +title: Schedule HTTP API +weight: 1200 +--- # Create a schedule @@ -25,32 +27,30 @@ The above command returns JSON structured in the following way: ```json { - "id": "SBM7DV7BKFUYU", - "name": "Demo schedule iCal", - "type": "ical", - "team_id": null, - "ical_url_primary": "https://example.com/meow_calendar.ics", - "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", - "on_call_now": [ - "U4DNY931HHJS5" - ], - "slack": { - "channel_id": "MEOW_SLACK_ID", - "user_group_id": "MEOW_SLACK_ID" - } + "id": "SBM7DV7BKFUYU", + "name": "Demo schedule iCal", + "type": "ical", + "team_id": null, + "ical_url_primary": "https://example.com/meow_calendar.ics", + "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", + "on_call_now": ["U4DNY931HHJS5"], + "slack": { + "channel_id": "MEOW_SLACK_ID", + "user_group_id": "MEOW_SLACK_ID" + } } ``` -| Parameter | Unique | Required | Description | -|-----------|:------:|:--------:|:------------| -`name` | Yes | Yes | Schedule name. -`type` | No | Yes | Schedule type. May be `ical` (used for iCalendar integration) or `calendar` (used for manually created on-call shifts). -`team_id` | No | No | ID of the team. -`time_zone` | No | Optional | Schedule time zone. Is used for manually added on-call shifts in Schedules with type `calendar`. Default time zone is `UTC`. For more information about time zones, see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). -`ical_url_primary` | No | If type = `ical` | URL of external iCal calendar for schedule with type `ical`. -`ical_url_overrides` | No | Optional | URL of external iCal calendar for schedule with any type. Events from this calendar override events from primary calendar or from on-call shifts. -`slack` | No | Optional | Dictionary with Slack-specific settings for a schedule. Includes `channel_id` and `user_group_id` fields, that take a channel ID and a user group ID from Slack. -`shifts` | No | Optional | List of shifts. Used for manually added on-call shifts in Schedules with type `calendar`. +| Parameter | Unique | Required | Description | +| -------------------- | :----: | :--------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Yes | Yes | Schedule name. | +| `type` | No | Yes | Schedule type. May be `ical` (used for iCalendar integration) or `calendar` (used for manually created on-call shifts). | +| `team_id` | No | No | ID of the team. | +| `time_zone` | No | Optional | Schedule time zone. Is used for manually added on-call shifts in Schedules with type `calendar`. Default time zone is `UTC`. For more information about time zones, see [time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). | +| `ical_url_primary` | No | If type = `ical` | URL of external iCal calendar for schedule with type `ical`. | +| `ical_url_overrides` | No | Optional | URL of external iCal calendar for schedule with any type. Events from this calendar override events from primary calendar or from on-call shifts. | +| `slack` | No | Optional | Dictionary with Slack-specific settings for a schedule. Includes `channel_id` and `user_group_id` fields, that take a channel ID and a user group ID from Slack. | +| `shifts` | No | Optional | List of shifts. Used for manually added on-call shifts in Schedules with type `calendar`. | **HTTP request** @@ -69,19 +69,17 @@ The above command returns JSON structured in the following way: ```json { - "id": "SBM7DV7BKFUYU", - "name": "Demo schedule iCal", - "type": "ical", - "team_id": null, - "ical_url_primary": "https://example.com/meow_calendar.ics", - "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", - "on_call_now": [ - "U4DNY931HHJS5" - ], - "slack": { - "channel_id": "MEOW_SLACK_ID", - "user_group_id": "MEOW_SLACK_ID" - } + "id": "SBM7DV7BKFUYU", + "name": "Demo schedule iCal", + "type": "ical", + "team_id": null, + "ical_url_primary": "https://example.com/meow_calendar.ics", + "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", + "on_call_now": ["U4DNY931HHJS5"], + "slack": { + "channel_id": "MEOW_SLACK_ID", + "user_group_id": "MEOW_SLACK_ID" + } } ``` @@ -95,58 +93,51 @@ The above command returns JSON structured in the following way: curl "{{API_URL}}/api/v1/schedules/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 2, - "next": null, - "previous": null, - "results": [ - { - "id": "SBM7DV7BKFUYU", - "name": "Demo schedule iCal", - "type": "ical", - "team_id": null, - "ical_url_primary": "https://example.com/meow_calendar.ics", - "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", - "on_call_now": [ - "U4DNY931HHJS5" - ], - "slack": { - "channel_id": "MEOW_SLACK_ID", - "user_group_id": "MEOW_SLACK_ID" - } - }, - { - "id": "S3Z477AHDXTMF", - "name": "Demo schedule Calendar", - "type": "calendar", - "team_id": null, - "time_zone": "America/New_York", - "on_call_now": [ - "U4DNY931HHJS5" - ], - "shifts": [ - "OH3V5FYQEYJ6M", - "O9WTH7CKM3KZW" - ], - "ical_url_overrides": null, - "slack": { - "channel_id": "MEOW_SLACK_ID", - "user_group_id": "MEOW_SLACK_ID" - } - } - ] + "count": 2, + "next": null, + "previous": null, + "results": [ + { + "id": "SBM7DV7BKFUYU", + "name": "Demo schedule iCal", + "type": "ical", + "team_id": null, + "ical_url_primary": "https://example.com/meow_calendar.ics", + "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", + "on_call_now": ["U4DNY931HHJS5"], + "slack": { + "channel_id": "MEOW_SLACK_ID", + "user_group_id": "MEOW_SLACK_ID" + } + }, + { + "id": "S3Z477AHDXTMF", + "name": "Demo schedule Calendar", + "type": "calendar", + "team_id": null, + "time_zone": "America/New_York", + "on_call_now": ["U4DNY931HHJS5"], + "shifts": ["OH3V5FYQEYJ6M", "O9WTH7CKM3KZW"], + "ical_url_overrides": null, + "slack": { + "channel_id": "MEOW_SLACK_ID", + "user_group_id": "MEOW_SLACK_ID" + } + } + ] } ``` The following available filter parameter should be provided as a `GET` argument: -* `name` (Exact match) +- `name` (Exact match) **HTTP request** @@ -172,19 +163,17 @@ The above command returns JSON structured in the following way: ```json { - "id": "SBM7DV7BKFUYU", - "name": "Demo schedule iCal", - "type": "ical", - "team_id": null, - "ical_url_primary": "https://example.com/meow_calendar.ics", - "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", - "on_call_now": [ - "U4DNY931HHJS5" - ], - "slack": { - "channel_id": "MEOW_SLACK_ID", - "user_group_id": "MEOW_SLACK_ID" - } + "id": "SBM7DV7BKFUYU", + "name": "Demo schedule iCal", + "type": "ical", + "team_id": null, + "ical_url_primary": "https://example.com/meow_calendar.ics", + "ical_url_overrides": "https://example.com/meow_calendar_overrides.ics", + "on_call_now": ["U4DNY931HHJS5"], + "slack": { + "channel_id": "MEOW_SLACK_ID", + "user_group_id": "MEOW_SLACK_ID" + } } ``` @@ -203,4 +192,4 @@ curl "{{API_URL}}/api/v1/schedules/SBM7DV7BKFUYU/" \ **HTTP request** -`DELETE {{API_URL}}/api/v1/schedules//` \ No newline at end of file +`DELETE {{API_URL}}/api/v1/schedules//` diff --git a/docs/sources/oncall-api-reference/slack_channels.md b/docs/sources/oncall-api-reference/slack_channels.md index 8426cbfc..6dda924b 100644 --- a/docs/sources/oncall-api-reference/slack_channels.md +++ b/docs/sources/oncall-api-reference/slack_channels.md @@ -1,8 +1,10 @@ -+++ -title = "Slack Channels HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/slack_channels/"] -weight = 1300 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/slack_channels/ + - /docs/oncall/latest/oncall-api-reference/slack_channels/ +title: Slack Channels HTTP API +weight: 1300 +--- # List Slack Channels @@ -10,29 +12,29 @@ weight = 1300 curl "{{API_URL}}/api/v1/slack_channels/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "name": "meow_channel", - "slack_id": "MEOW_SLACK_ID" - } - ] + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "name": "meow_channel", + "slack_id": "MEOW_SLACK_ID" + } + ] } ``` The following available filter parameter should be provided as a `GET` argument: -* `channel_name` +- `channel_name` **HTTP Request** -`GET {{API_URL}}/api/v1/slack_channels/` \ No newline at end of file +`GET {{API_URL}}/api/v1/slack_channels/` diff --git a/docs/sources/oncall-api-reference/user_groups.md b/docs/sources/oncall-api-reference/user_groups.md index a336b1db..7fdef887 100644 --- a/docs/sources/oncall-api-reference/user_groups.md +++ b/docs/sources/oncall-api-reference/user_groups.md @@ -1,46 +1,49 @@ -+++ -title = "OnCall User Groups HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/user_groups/"] -weight = 1400 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/user_groups/ + - /docs/oncall/latest/oncall-api-reference/user_groups/ +title: OnCall User Groups HTTP API +weight: 1400 +--- + # List user groups ```shell curl "{{API_URL}}/api/v1/user_groups/" \ --request GET \ --header "Authorization: meowmeowmeow" \ - --header "Content-Type: application/json" + --header "Content-Type: application/json" ``` The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ - { - "id": "GPFAPH7J7BKJB", - "type": "slack_based", - "slack": { - "id": "MEOW_SLACK_ID", - "name": "Meow Group", - "handle": "meow_group" - } - } - ] + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "GPFAPH7J7BKJB", + "type": "slack_based", + "slack": { + "id": "MEOW_SLACK_ID", + "name": "Meow Group", + "handle": "meow_group" + } + } + ] } ``` -| Parameter | Unique | Description | -|-----------|:------:|:------------| -`id` | Yes| User Group ID -`type` | No | [Slack-defined user groups](https://slack.com/intl/en-ru/help/articles/212906697-Create-a-user-group) -`slack` | No | Metadata retrieved from Slack. +| Parameter | Unique | Description | +| --------- | :----: | :---------------------------------------------------------------------------------------------------- | +| `id` | Yes | User Group ID | +| `type` | No | [Slack-defined user groups](https://slack.com/intl/en-ru/help/articles/212906697-Create-a-user-group) | +| `slack` | No | Metadata retrieved from Slack. | **HTTP request** -`GET {{API_URL}}/api/v1/user_groups/` \ No newline at end of file +`GET {{API_URL}}/api/v1/user_groups/` diff --git a/docs/sources/oncall-api-reference/users.md b/docs/sources/oncall-api-reference/users.md index 9dde2cfb..a64514dc 100644 --- a/docs/sources/oncall-api-reference/users.md +++ b/docs/sources/oncall-api-reference/users.md @@ -1,35 +1,37 @@ -+++ -title = "Grafana OnCall Users HTTP API" -aliases = ["/docs/grafana-cloud/oncall/oncall-api-reference/users/"] -weight = 1500 -+++ +--- +aliases: + - /docs/grafana-cloud/oncall/oncall-api-reference/users/ + - /docs/oncall/latest/oncall-api-reference/users/ +title: Grafana OnCall Users HTTP API +weight: 1500 +--- # Get a user This endpoint retrieves the user object. -```shell +````shell ```shell curl "{{API_URL}}/api/v1/users/current/" \ --request GET \ --header "Authorization: meowmeowmeow" \ --header "Content-Type: application/json" -``` +```` The above command returns JSON structured in the following way: ```json { - "id": "U4DNY931HHJS5", - "email": "public-api-demo-user-1@grafana.com", - "slack": [ - { - "user_id": "UALEXSLACKDJPK", - "team_id": "TALEXSLACKDJPK" - } - ], - "username": "alex", - "role": "admin" + "id": "U4DNY931HHJS5", + "email": "public-api-demo-user-1@grafana.com", + "slack": [ + { + "user_id": "UALEXSLACKDJPK", + "team_id": "TALEXSLACKDJPK" + } + ], + "username": "alex", + "role": "admin" } ``` @@ -37,15 +39,15 @@ The above command returns JSON structured in the following way: `GET {{API_URL}}/api/v1/users//` -Use `{{API_URL}}/api/v1/users/current` to retrieve the current user. +Use `{{API_URL}}/api/v1/users/current` to retrieve the current user. -| Parameter | Unique | Description | -|-----------|:------:|:------------| -`id` | Yes/org | User ID -`email` | Yes/org | User e-mail -`slack` | Yes/org | List of user IDs from connected Slack. User linking key is e-mail. -`username` | Yes/org | User username -`role` | No | One of: `user`, `observer`, `admin`. +| Parameter | Unique | Description | +| ---------- | :-----: | :----------------------------------------------------------------- | +| `id` | Yes/org | User ID | +| `email` | Yes/org | User e-mail | +| `slack` | Yes/org | List of user IDs from connected Slack. User linking key is e-mail. | +| `username` | Yes/org | User username | +| `role` | No | One of: `user`, `observer`, `admin`. | # List Users @@ -60,23 +62,23 @@ The above command returns JSON structured in the following way: ```json { - "count": 1, - "next": null, - "previous": null, - "results": [ + "count": 1, + "next": null, + "previous": null, + "results": [ + { + "id": "U4DNY931HHJS5", + "email": "public-api-demo-user-1@grafana.com", + "slack": [ { - "id": "U4DNY931HHJS5", - "email": "public-api-demo-user-1@grafana.com", - "slack": [ - { - "user_id": "UALEXSLACKDJPK", - "team_id": "TALEXSLACKDJPK" - } - ], - "username": "alex", - "role": "admin" + "user_id": "UALEXSLACKDJPK", + "team_id": "TALEXSLACKDJPK" } - ] + ], + "username": "alex", + "role": "admin" + } + ] } ``` @@ -84,8 +86,8 @@ This endpoint retrieves all users. The following available filter parameter should be provided as a `GET` argument: -* `username` (Exact match) +- `username` (Exact match) **HTTP request** -`GET {{API_URL}}/api/v1/users/` \ No newline at end of file +`GET {{API_URL}}/api/v1/users/` diff --git a/docs/sources/open-source.md b/docs/sources/open-source.md new file mode 100644 index 00000000..8d77b0d0 --- /dev/null +++ b/docs/sources/open-source.md @@ -0,0 +1,170 @@ +--- +aliases: + - /docs/grafana-cloud/oncall/open-source/ + - /docs/oncall/latest/open-source/ +keywords: + - Open Source +title: Open Source +weight: 100 +--- + +# Open Source + +We prepared three environments for OSS users: +- **Hobby** environment for local usage & playing around: [README.md](https://github.com/grafana/oncall#getting-started). +- **Development** environment for contributors: [DEVELOPER.md](https://github.com/grafana/oncall/blob/dev/DEVELOPER.md) +- **Production** environment for reliable cloud installation using Helm: [Production Environment](#production-environment) + +## Production Environment + +TBD + +## Slack Setup + +Grafana OnCall Slack integration use a lot of Slack API features: +- Subscription on Slack events requires OnCall to be externally available and provide https endpoint. +- You will need to register new Slack App. + +1. Make sure your OnCall is up and running. + +2. You need OnCall to be accessible through https. For development purposes we suggest using [localtunnel](https://github.com/localtunnel/localtunnel). For production purposes please consider setting up proper web server with HTTPS termination. For localtunnel: +```bash +# Choose the unique prefix instead of pretty-turkey-83 +# Localtunnel will generate an url, e.g. https://pretty-turkey-83.loca.lt +# it is referred as below +lt --port 8000 -s pretty-turkey-83 --print-requests +``` + +3. If you use localtunnel, open your external URL and click "Continue" to allow requests to bypass the warning page. + +4. [Create a Slack Workspace](https://slack.com/create) for development, or use your company workspace. + +5. Go to https://api.slack.com/apps and click Create New App button + +6. Select `From an app manifest` option and choose the right workspace + +7. Copy and paste the following block with the correct and fields + + ```yaml + _metadata: + major_version: 1 + minor_version: 1 + display_information: + name: + features: + app_home: + home_tab_enabled: true + messages_tab_enabled: true + messages_tab_read_only_enabled: false + bot_user: + display_name: + always_online: true + shortcuts: + - name: Create a new incident + type: message + callback_id: incident_create + description: Creates a new OnCall incident + - name: Add to postmortem + type: message + callback_id: add_postmortem + description: Add this message to postmortem + slash_commands: + - command: /oncall + url: /slack/interactive_api_endpoint/ + description: oncall + should_escape: false + oauth_config: + redirect_urls: + - /api/internal/v1/complete/slack-install-free/ + - /api/internal/v1/complete/slack-login/ + scopes: + user: + - channels:read + - chat:write + - identify + - users.profile:read + bot: + - app_mentions:read + - channels:history + - channels:read + - chat:write + - chat:write.customize + - chat:write.public + - commands + - files:write + - groups:history + - groups:read + - im:history + - im:read + - im:write + - mpim:history + - mpim:read + - mpim:write + - reactions:write + - team:read + - usergroups:read + - usergroups:write + - users.profile:read + - users:read + - users:read.email + - users:write + settings: + event_subscriptions: + request_url: /slack/event_api_endpoint/ + bot_events: + - app_home_opened + - app_mention + - channel_archive + - channel_created + - channel_deleted + - channel_rename + - channel_unarchive + - member_joined_channel + - message.channels + - message.im + - subteam_created + - subteam_members_changed + - subteam_updated + - user_change + interactivity: + is_enabled: true + request_url: /slack/interactive_api_endpoint/ + org_deploy_enabled: false + socket_mode_enabled: false + ``` + +6. Go to your "OnCall" -> "Env Variables" and set: + ``` + SLACK_CLIENT_OAUTH_ID = Basic Information -> App Credentials -> Client ID + SLACK_CLIENT_OAUTH_SECRET = Basic Information -> App Credentials -> Client Secret + SLACK_SIGNING_SECRET = Basic Information -> App Credentials -> Signing Secret + SLACK_INSTALL_RETURN_REDIRECT_HOST = << OnCall external URL >> + ``` + +7. Go to "OnCall" -> "ChatOps" -> "Slack" and install Slack Integration + +8. All set! + +## Telegram Setup + +- Telegram integrations requires OnCall to be externally available and provide https endpoint. +- Telegram integration in OnCall is designed for collaborative team work. It requires Telegram Group and a Telegram Channel (private) for alerts. + +1. Make sure your OnCall is up and running. + +2. Respectfully ask [BotFather](https://t.me/BotFather) for a key, put it in `TELEGRAM_TOKEN` in "OnCall" -> "Env Variables". + +3. Set `TELEGRAM_WEBHOOK_HOST` with your external url for OnCall. + +4. Go to "OnCall" -> "ChatOps" -> Telegram and enjoy! + +## Grafana OSS-Cloud Setup + +Grafana OSS could be connected to Grafana Cloud for heartbeat and SMS / Phone Calls. We tried our best in making Grafana OSS <-> Cloud self-explanatory. Check "Cloud" page in your OSS OnCall instance. + +Please note that it's possible either to use Grafana Cloud either Twilio for SMS/Phone calls. + +## Twilio Setup + +1. Make sure Grafana OSS <-> Cloud connector is disabled. Set `GRAFANA_CLOUD_NOTIFICATIONS_ENABLED` as False. +2. Check "OnCall" -> "Env Variables" and set all variables starting with `TWILIO_` diff --git a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py index 2ee420e7..a082270e 100644 --- a/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py +++ b/engine/apps/alerts/escalation_snapshot/snapshot_classes/escalation_policy_snapshot.py @@ -266,7 +266,7 @@ class EscalationPolicySnapshot: escalation_policy_step=self.step, ) else: - notify_to_users_list = list_users_to_notify_from_ical(on_call_schedule) + notify_to_users_list = list_users_to_notify_from_ical(on_call_schedule, include_viewers=True) if notify_to_users_list is None: log_record = AlertGroupLogRecord( type=AlertGroupLogRecord.TYPE_ESCALATION_FAILED, diff --git a/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py b/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py index 7bfcbdef..a9ca08fb 100644 --- a/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py +++ b/engine/apps/alerts/grafana_alerting_sync_manager/grafana_alerting_sync.py @@ -5,7 +5,7 @@ from typing import Optional from django.apps import apps from rest_framework import status -from apps.alerts.tasks import create_contact_points_for_datasource +from apps.alerts.tasks import schedule_create_contact_points_for_datasource from apps.grafana_plugin.helpers import GrafanaAPIClient logger = logging.getLogger(__name__) @@ -77,16 +77,15 @@ class GrafanaAlertingSyncManager: # sync other datasource for datasource in datasources: if datasource["type"] == GrafanaAlertingSyncManager.ALERTING_DATASOURCE: - if self.create_contact_point(datasource) is None: + contact_point = self.create_contact_point(datasource) + if contact_point is None: # Failed to create contact point duo to getting wrong alerting config. It is expected behaviour. # Add datasource to list and retry to create contact point for it async datasources_to_create.append(datasource) if datasources_to_create: # create other contact points async - create_contact_points_for_datasource.apply_async( - (self.alert_receive_channel.pk, datasources_to_create), - ) + schedule_create_contact_points_for_datasource(self.alert_receive_channel.pk, datasources_to_create) else: self.alert_receive_channel.is_finished_alerting_setup = True self.alert_receive_channel.save(update_fields=["is_finished_alerting_setup"]) diff --git a/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py b/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py index 6f9997d7..3d0127ca 100644 --- a/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py +++ b/engine/apps/alerts/incident_appearance/templaters/phone_call_templater.py @@ -1,5 +1,5 @@ from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater -from common.utils import clean_markup +from common.utils import clean_markup, escape_for_twilio_phone_call class AlertPhoneCallTemplater(AlertTemplater): @@ -24,8 +24,4 @@ class AlertPhoneCallTemplater(AlertTemplater): return sf.format(data) def _escape(self, data): - # https://www.twilio.com/docs/api/errors/12100 - data = data.replace("&", "&") - data = data.replace(">", ">") - data = data.replace("<", "<") - return data + return escape_for_twilio_phone_call(data) diff --git a/engine/apps/alerts/migrations/0001_squashed_initial.py b/engine/apps/alerts/migrations/0001_squashed_initial.py index 40ec3b57..bc66bc5c 100644 --- a/engine/apps/alerts/migrations/0001_squashed_initial.py +++ b/engine/apps/alerts/migrations/0001_squashed_initial.py @@ -16,6 +16,8 @@ from django.db import migrations, models import django.db.models.deletion import django.db.models.manager +from apps.alerts.integration_options_mixin import IntegrationOptionsMixin + class Migration(migrations.Migration): @@ -132,7 +134,7 @@ class Migration(migrations.Migration): ('public_primary_key', models.CharField(default=apps.alerts.models.alert_receive_channel.generate_public_primary_key_for_alert_receive_channel, max_length=20, unique=True, validators=[django.core.validators.MinLengthValidator(13)])), ('created_at', models.DateTimeField(auto_now_add=True)), ('deleted_at', models.DateTimeField(blank=True, null=True)), - ('integration', models.CharField(choices=[('alertmanager', 'AlertManager'), ('grafana', 'Grafana'), ('grafana_alerting', 'Grafana Alerting'), ('formatted_webhook', 'Formatted Webhook'), ('webhook', 'Webhook'), ('amazon_sns', 'Amazon SNS'), ('heartbeat', 'Heartbeat'), ('inbound_email', 'Inboubd Email'), ('maintenance', 'Maintenance'), ('manual', 'Manual'), ('slack_channel', 'Slack Channel'), ('stackdriver', 'Stackdriver'), ('curler', 'Curler'), ('datadog', 'Datadog'), ('demo', 'Demo'), ('elastalert', 'Elastalert'), ('fabric', 'Fabric'), ('kapacitor', 'Kapacitor'), ('newrelic', 'New Relic'), ('pagerduty', 'Pagerduty'), ('pingdom', 'Pingdom'), ('prtg', 'PRTG'), ('sentry', 'Sentry'), ('uptimerobot', 'UptimeRobot'), ('zabbix', 'Zabbix')], default='grafana', max_length=100)), + ('integration', models.CharField(choices=IntegrationOptionsMixin.INTEGRATION_CHOICES,default=IntegrationOptionsMixin.DEFAULT_INTEGRATION, max_length=100)), ('allow_source_based_resolving', models.BooleanField(default=True)), ('token', models.CharField(db_index=True, default=apps.alerts.models.alert_receive_channel.random_token_generator, max_length=30)), ('smile_code', models.TextField(default=':slightly_smiling_face:')), diff --git a/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py deleted file mode 100644 index 5729cbd6..00000000 --- a/engine/apps/alerts/migrations/0003_squashed_create_demo_token_instances.py +++ /dev/null @@ -1,178 +0,0 @@ -# Generated by Django 3.2.5 on 2021-08-04 10:42 - -import sys -from django.db import migrations -from django.utils import timezone, dateparse -from apps.alerts.models.alert_receive_channel import number_to_smiles_translator -from apps.public_api import constants as public_api_constants - - -TYPE_SINGLE_EVENT = 0 -TYPE_RECURRENT_EVENT = 1 -FREQUENCY_WEEKLY = 1 -SOURCE_TERRAFORM = 3 -STEP_WAIT = 0 -STEP_NOTIFY_USERS_QUEUE = 12 -SOURCE_WEB = 1 - - -def create_demo_token_instances(apps, schema_editor): - if not (len(sys.argv) > 1 and sys.argv[1] == 'test'): - User = apps.get_model('user_management', 'User') - Organization = apps.get_model('user_management', 'Organization') - AlertReceiveChannel = apps.get_model('alerts', 'AlertReceiveChannel') - EscalationChain = apps.get_model('alerts', 'EscalationChain') - ChannelFilter = apps.get_model('alerts', 'ChannelFilter') - EscalationPolicy = apps.get_model('alerts', 'EscalationPolicy') - OnCallScheduleICal = apps.get_model('schedules', 'OnCallScheduleICal') - AlertGroup = apps.get_model('alerts', 'AlertGroup') - Alert = apps.get_model('alerts', 'Alert') - CustomButton = apps.get_model("alerts", "CustomButton") - CustomOnCallShift = apps.get_model('schedules', 'CustomOnCallShift') - - organization = Organization.objects.get(public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID) - user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID) - - alert_receive_channel, _ = AlertReceiveChannel.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_INTEGRATION_ID, - defaults=dict( - integration=0, - author=user, - organization=organization, - smile_code=number_to_smiles_translator(0) - ) - ) - escalation_chain, _ = EscalationChain.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ESCALATION_CHAIN_ID, - defaults=dict( - name="default", - organization=organization, - ) - ) - - channel_filter_1, _ = ChannelFilter.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ROUTE_ID_1, - defaults=dict( - alert_receive_channel=alert_receive_channel, - slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID, - filtering_term='us-(east|west)', - order=0, - escalation_chain=escalation_chain, - ) - ) - ChannelFilter.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ROUTE_ID_2, - defaults=dict( - alert_receive_channel=alert_receive_channel, - slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID, - filtering_term='.*', - order=1, - is_default=True, - escalation_chain=escalation_chain, - ) - ) - - EscalationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1, - defaults=dict( - step=STEP_WAIT, - wait_delay=timezone.timedelta(minutes=1), - order=0, - escalation_chain=escalation_chain, - ) - ) - - escalation_policy_1, _ = EscalationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_2, - defaults=dict( - step=STEP_NOTIFY_USERS_QUEUE, - order=1, - escalation_chain=escalation_chain, - ) - ) - escalation_policy_1.notify_to_users_queue.add(user) - - schedule, _ = OnCallScheduleICal.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL, - defaults=dict( - organization=organization, - name=public_api_constants.DEMO_SCHEDULE_NAME_ICAL, - ical_url_overrides=public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES, - channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - ) - ) - - alert_group, _ = AlertGroup.all_objects.get_or_create( - public_primary_key=public_api_constants.DEMO_INCIDENT_ID, - defaults=dict( - channel=alert_receive_channel, - channel_filter=channel_filter_1, - resolved=True, - resolved_at=dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_RESOLVED_AT), - ) - ) - alert_group.started_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_CREATED_AT) - alert_group.save(update_fields=['started_at']) - - for id, created_at in public_api_constants.DEMO_ALERT_IDS: - alert, _ = Alert.objects.get_or_create( - public_primary_key=id, - defaults=dict( - group=alert_group, - raw_request_data=public_api_constants.DEMO_ALERT_PAYLOAD, - title='Memory above 90% threshold', - ) - ) - alert.created_at = dateparse.parse_datetime(created_at) - alert.save(update_fields=['created_at']) - - CustomButton.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_CUSTOM_ACTION_ID, - defaults=dict( - name=public_api_constants.DEMO_CUSTOM_ACTION_NAME, - organization=organization, - ) - ) - - on_call_shift_1, _ = CustomOnCallShift.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, - defaults=dict( - type=TYPE_SINGLE_EVENT, - organization=organization, - name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1, - start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_1), - duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION), - ) - ) - - on_call_shift_1.users.add(user) - - on_call_shift_2, _ = CustomOnCallShift.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, - defaults=dict( - type=TYPE_RECURRENT_EVENT, - organization=organization, - name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2, - start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_2), - duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION), - frequency=FREQUENCY_WEEKLY, - interval=2, - by_day=public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY, - source=SOURCE_TERRAFORM, - ) - ) - - on_call_shift_2.users.add(user) - - -class Migration(migrations.Migration): - - dependencies = [ - ('alerts', '0002_squashed_initial'), - ('user_management', '0002_squashed_create_demo_token_instances'), - ('schedules', '0002_squashed_initial'), - ] - - operations = [ - migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop) - ] diff --git a/engine/apps/alerts/models/channel_filter.py b/engine/apps/alerts/models/channel_filter.py index b1f1dae2..fb369088 100644 --- a/engine/apps/alerts/models/channel_filter.py +++ b/engine/apps/alerts/models/channel_filter.py @@ -113,20 +113,7 @@ class ChannelFilter(OrderedModel): return satisfied_filter def is_satisfying(self, raw_request_data, title, message=None): - AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel") - - return ( - self.is_default - or self.check_filter(json.dumps(raw_request_data)) - or self.check_filter(str(title)) - or - # Special case for Amazon SNS - ( - self.check_filter(str(message)) - if self.alert_receive_channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS - else False - ) - ) + return self.is_default or self.check_filter(json.dumps(raw_request_data)) or self.check_filter(str(title)) def check_filter(self, value): return re.search(self.filtering_term, value) diff --git a/engine/apps/alerts/tasks/__init__.py b/engine/apps/alerts/tasks/__init__.py index 8e0e994f..3ff8501e 100644 --- a/engine/apps/alerts/tasks/__init__.py +++ b/engine/apps/alerts/tasks/__init__.py @@ -4,6 +4,7 @@ from .calculcate_escalation_finish_time import calculate_escalation_finish_time from .call_ack_url import call_ack_url # noqa: F401 from .check_escalation_finished import check_escalation_finished_task # noqa: F401 from .create_contact_points_for_datasource import create_contact_points_for_datasource # noqa: F401 +from .create_contact_points_for_datasource import schedule_create_contact_points_for_datasource # noqa: F401 from .custom_button_result import custom_button_result # noqa: F401 from .delete_alert_group import delete_alert_group # noqa: F401 from .distribute_alert import distribute_alert # noqa: F401 diff --git a/engine/apps/alerts/tasks/create_contact_points_for_datasource.py b/engine/apps/alerts/tasks/create_contact_points_for_datasource.py index f3dc3f4b..a447a39c 100644 --- a/engine/apps/alerts/tasks/create_contact_points_for_datasource.py +++ b/engine/apps/alerts/tasks/create_contact_points_for_datasource.py @@ -1,9 +1,32 @@ +import logging + +from celery.utils.log import get_task_logger from django.apps import apps +from django.core.cache import cache from rest_framework import status from apps.grafana_plugin.helpers import GrafanaAPIClient from common.custom_celery_tasks import shared_dedicated_queue_retry_task +logger = get_task_logger(__name__) +logger.setLevel(logging.DEBUG) + + +def get_cache_key_create_contact_points_for_datasource(alert_receive_channel_id): + CACHE_KEY_PREFIX = "create_contact_points_for_datasource" + return f"{CACHE_KEY_PREFIX}_{alert_receive_channel_id}" + + +@shared_dedicated_queue_retry_task +def schedule_create_contact_points_for_datasource(alert_receive_channel_id, datasource_list): + CACHE_LIFETIME = 600 + START_TASK_DELAY = 3 + task = create_contact_points_for_datasource.apply_async( + args=[alert_receive_channel_id, datasource_list], countdown=START_TASK_DELAY + ) + cache_key = get_cache_key_create_contact_points_for_datasource(alert_receive_channel_id) + cache.set(cache_key, task.id, timeout=CACHE_LIFETIME) + @shared_dedicated_queue_retry_task(autoretry_for=(Exception,), retry_backoff=True, max_retries=10) def create_contact_points_for_datasource(alert_receive_channel_id, datasource_list): @@ -11,6 +34,11 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li Try to create contact points for other datasource. Restart task for datasource, for which contact point was not created. """ + cache_key = get_cache_key_create_contact_points_for_datasource(alert_receive_channel_id) + cached_task_id = cache.get(cache_key) + current_task_id = create_contact_points_for_datasource.request.id + if cached_task_id is not None and current_task_id != cached_task_id: + return AlertReceiveChannel = apps.get_model("alerts", "AlertReceiveChannel") @@ -21,7 +49,7 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li api_token=alert_receive_channel.organization.api_token, ) # list of datasource for which contact point creation was failed - datasource_to_create = [] + datasources_to_create = [] for datasource in datasource_list: contact_point = None config, response_info = client.get_alerting_config(datasource["id"]) @@ -29,16 +57,22 @@ def create_contact_points_for_datasource(alert_receive_channel_id, datasource_li if response_info.get("status_code") == status.HTTP_404_NOT_FOUND: client.get_alertmanager_status_with_config(datasource["id"]) contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource) + elif response_info.get("status_code") == status.HTTP_400_BAD_REQUEST: + logger.warning( + f"Failed to create contact point for integration {alert_receive_channel_id}, " + f"datasource info: {datasource}; response: {response_info}" + ) + continue else: contact_point = alert_receive_channel.grafana_alerting_sync_manager.create_contact_point(datasource) if contact_point is None: # Failed to create contact point duo to getting wrong alerting config. # Add datasource to list and retry to create contact point for it again - datasource_to_create.append(datasource) + datasources_to_create.append(datasource) # if some contact points were not created, restart task for them - if datasource_to_create: - create_contact_points_for_datasource.apply_async((alert_receive_channel_id, datasource_to_create), countdown=5) + if datasources_to_create: + schedule_create_contact_points_for_datasource(alert_receive_channel_id, datasources_to_create) else: alert_receive_channel.is_finished_alerting_setup = True alert_receive_channel.save(update_fields=["is_finished_alerting_setup"]) diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py index 05a9456f..b3a998b4 100644 --- a/engine/apps/alerts/tasks/notify_user.py +++ b/engine/apps/alerts/tasks/notify_user.py @@ -12,6 +12,7 @@ from apps.alerts.constants import NEXT_ESCALATION_DELAY from apps.alerts.incident_appearance.renderers.web_renderer import AlertGroupWebRenderer from apps.alerts.signals import user_notification_action_triggered_signal from apps.base.messaging import get_messaging_backend_from_id +from apps.base.utils import live_settings from common.custom_celery_tasks import shared_dedicated_queue_retry_task from .task_logger import task_logger @@ -56,6 +57,13 @@ def notify_user_task( if not user.is_notification_allowed: task_logger.info(f"notify_user_task: user {user.pk} notification is not allowed for role {user.role}") + UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + reason=f"notification is not allowed for user with role {user.role}", + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE, + ).save() return user_has_notification, _ = UserHasNotification.objects.get_or_create( @@ -257,11 +265,31 @@ def perform_notification(log_record_pk): ).save() return + if not user.is_notification_allowed: + UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + reason=f"notification is not allowed for user with role {user.role}", + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE, + ).save() + return + if notification_channel == UserNotificationPolicy.NotificationChannel.SMS: - SMSMessage.send_sms(user, alert_group, notification_policy) + SMSMessage.send_sms( + user, + alert_group, + notification_policy, + is_cloud_notification=live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED, + ) elif notification_channel == UserNotificationPolicy.NotificationChannel.PHONE_CALL: - PhoneCall.make_call(user, alert_group, notification_policy) + PhoneCall.make_call( + user, + alert_group, + notification_policy, + is_cloud_notification=live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED, + ) elif notification_channel == UserNotificationPolicy.NotificationChannel.TELEGRAM: if alert_group.notify_in_telegram_enabled is True: diff --git a/engine/apps/alerts/tests/test_alert_group_renderer.py b/engine/apps/alerts/tests/test_alert_group_renderer.py index 5253832e..aa7df113 100644 --- a/engine/apps/alerts/tests/test_alert_group_renderer.py +++ b/engine/apps/alerts/tests/test_alert_group_renderer.py @@ -2,7 +2,7 @@ import pytest from apps.alerts.incident_appearance.templaters import AlertSlackTemplater from apps.alerts.models import AlertGroup -from apps.integrations.metadata.configuration import grafana +from config_integrations import grafana @pytest.mark.django_db diff --git a/engine/apps/alerts/tests/test_default_templates.py b/engine/apps/alerts/tests/test_default_templates.py index 69288fb6..63cfd0b8 100644 --- a/engine/apps/alerts/tests/test_default_templates.py +++ b/engine/apps/alerts/tests/test_default_templates.py @@ -10,9 +10,9 @@ from apps.alerts.incident_appearance.templaters import ( AlertWebTemplater, ) from apps.alerts.models import Alert, AlertReceiveChannel -from apps.integrations.metadata.configuration import grafana from common.jinja_templater import jinja_template_env from common.utils import getattrd +from config_integrations import grafana @pytest.mark.django_db diff --git a/engine/apps/alerts/tests/test_escalation_policy_snapshot.py b/engine/apps/alerts/tests/test_escalation_policy_snapshot.py index a3d27f45..9a555c35 100644 --- a/engine/apps/alerts/tests/test_escalation_policy_snapshot.py +++ b/engine/apps/alerts/tests/test_escalation_policy_snapshot.py @@ -10,6 +10,7 @@ from apps.alerts.escalation_snapshot.utils import eta_for_escalation_step_notify from apps.alerts.models import AlertGroupLogRecord, EscalationPolicy from apps.schedules.ical_utils import list_users_to_notify_from_ical from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar +from common.constants.role import Role def get_escalation_policy_snapshot_from_model(escalation_policy): @@ -200,6 +201,55 @@ def test_escalation_step_notify_on_call_schedule( assert mocked_execute_tasks.called +@patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None) +@pytest.mark.django_db +def test_escalation_step_notify_on_call_schedule_viewer_user( + mocked_execute_tasks, + escalation_step_test_setup, + make_user_for_organization, + make_escalation_policy, + make_schedule, + make_on_call_shift, +): + organization, user, _, channel_filter, alert_group, reason = escalation_step_test_setup + viewer = make_user_for_organization(organization=organization, role=Role.VIEWER) + + schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar) + # create on_call_shift with user to notify + data = { + "start": timezone.datetime.now().replace(microsecond=0), + "duration": timezone.timedelta(seconds=7200), + } + on_call_shift = make_on_call_shift( + organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data + ) + on_call_shift.users.add(viewer) + schedule.custom_on_call_shifts.add(on_call_shift) + + notify_schedule_step = make_escalation_policy( + escalation_chain=channel_filter.escalation_chain, + escalation_policy_step=EscalationPolicy.STEP_NOTIFY_SCHEDULE, + notify_schedule=schedule, + ) + escalation_policy_snapshot = get_escalation_policy_snapshot_from_model(notify_schedule_step) + expected_eta = timezone.now() + timezone.timedelta(seconds=NEXT_ESCALATION_DELAY) + result = escalation_policy_snapshot.execute(alert_group, reason) + expected_result = EscalationPolicySnapshot.StepExecutionResultData( + eta=result.eta, + stop_escalation=False, + pause_escalation=False, + start_from_beginning=False, + ) + assert expected_eta + timezone.timedelta(seconds=15) > result.eta > expected_eta - timezone.timedelta(seconds=15) + assert result == expected_result + assert notify_schedule_step.log_records.filter(type=AlertGroupLogRecord.TYPE_ESCALATION_TRIGGERED).exists() + assert list(escalation_policy_snapshot.notify_to_users_queue) == list( + list_users_to_notify_from_ical(schedule, include_viewers=True) + ) + assert list(escalation_policy_snapshot.notify_to_users_queue) == [viewer] + assert mocked_execute_tasks.called + + @patch("apps.alerts.escalation_snapshot.snapshot_classes.EscalationPolicySnapshot._execute_tasks", return_value=None) @pytest.mark.django_db def test_escalation_step_notify_user_group( diff --git a/engine/apps/alerts/tests/test_notify_user.py b/engine/apps/alerts/tests/test_notify_user.py index 06677544..0f43305b 100644 --- a/engine/apps/alerts/tests/test_notify_user.py +++ b/engine/apps/alerts/tests/test_notify_user.py @@ -2,9 +2,10 @@ from unittest.mock import patch import pytest -from apps.alerts.tasks.notify_user import perform_notification +from apps.alerts.tasks.notify_user import notify_user_task, perform_notification from apps.base.models.user_notification_policy import UserNotificationPolicy from apps.base.models.user_notification_policy_log_record import UserNotificationPolicyLogRecord +from common.constants.role import Role @pytest.mark.django_db @@ -118,3 +119,62 @@ def test_notify_user_missing_data_errors( assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED assert error_log_record.reason == "Expected data is missing" assert error_log_record.notification_error_code is None + + +@pytest.mark.django_db +def test_notify_user_perform_notification_error_if_viewer( + make_organization, + make_user, + make_user_notification_policy, + make_alert_receive_channel, + make_alert_group, + make_user_notification_policy_log_record, +): + organization = make_organization() + user_1 = make_user(organization=organization, role=Role.VIEWER, _verified_phone_number="1234567890") + user_notification_policy = make_user_notification_policy( + user=user_1, + step=UserNotificationPolicy.Step.NOTIFY, + notify_by=UserNotificationPolicy.NotificationChannel.SMS, + ) + alert_receive_channel = make_alert_receive_channel(organization=organization) + alert_group = make_alert_group(alert_receive_channel=alert_receive_channel) + log_record = make_user_notification_policy_log_record( + author=user_1, + alert_group=alert_group, + notification_policy=user_notification_policy, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_TRIGGERED, + ) + + perform_notification(log_record.pk) + + error_log_record = UserNotificationPolicyLogRecord.objects.last() + assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED + assert error_log_record.reason == f"notification is not allowed for user with role {user_1.role}" + assert ( + error_log_record.notification_error_code + == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE + ) + + +@pytest.mark.django_db +def test_notify_user_error_if_viewer( + make_organization, + make_user, + make_alert_receive_channel, + make_alert_group, +): + organization = make_organization() + user_1 = make_user(organization=organization, role=Role.VIEWER, _verified_phone_number="1234567890") + alert_receive_channel = make_alert_receive_channel(organization=organization) + alert_group = make_alert_group(alert_receive_channel=alert_receive_channel) + + notify_user_task(user_1.pk, alert_group.pk) + + error_log_record = UserNotificationPolicyLogRecord.objects.last() + assert error_log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED + assert error_log_record.reason == f"notification is not allowed for user with role {user_1.role}" + assert ( + error_log_record.notification_error_code + == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE + ) diff --git a/engine/apps/api/serializers/user.py b/engine/apps/api/serializers/user.py index e9ec91b2..db0db0ed 100644 --- a/engine/apps/api/serializers/user.py +++ b/engine/apps/api/serializers/user.py @@ -1,9 +1,12 @@ +from django.conf import settings from rest_framework import serializers from apps.api.serializers.telegram import TelegramToUserConnectorSerializer from apps.base.constants import ADMIN_PERMISSIONS, ALL_ROLES_PERMISSIONS, EDITOR_PERMISSIONS from apps.base.messaging import get_messaging_backends from apps.base.models import UserNotificationPolicy +from apps.base.utils import live_settings +from apps.oss_installation.utils import cloud_user_identity_status from apps.twilioapp.utils import check_phone_number_is_valid from apps.user_management.models import User from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField @@ -30,6 +33,7 @@ class UserSerializer(DynamicFieldsModelSerializer, EagerLoadingMixin): permissions = serializers.SerializerMethodField() notification_chain_verbal = serializers.SerializerMethodField() + cloud_connection_status = serializers.SerializerMethodField() SELECT_RELATED = ["telegram_verification_code", "telegram_connection", "organization", "slack_user_identity"] @@ -50,6 +54,7 @@ class UserSerializer(DynamicFieldsModelSerializer, EagerLoadingMixin): "messaging_backends", "permissions", "notification_chain_verbal", + "cloud_connection_status", ] read_only_fields = [ "email", @@ -88,6 +93,15 @@ class UserSerializer(DynamicFieldsModelSerializer, EagerLoadingMixin): default, important = UserNotificationPolicy.get_short_verbals_for_user(user=obj) return {"default": " - ".join(default), "important": " - ".join(important)} + def get_cloud_connection_status(self, obj): + if settings.OSS_INSTALLATION and live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + connector = self.context.get("connector", None) + identities = self.context.get("cloud_identities", {}) + identity = identities.get(obj.email, None) + status, _ = cloud_user_identity_status(connector, identity) + return status + return None + class UserHiddenFieldsSerializer(UserSerializer): available_for_all_roles_fields = [ diff --git a/engine/apps/api/tests/test_features.py b/engine/apps/api/tests/test_features.py index e391b8fb..30b37944 100644 --- a/engine/apps/api/tests/test_features.py +++ b/engine/apps/api/tests/test_features.py @@ -3,7 +3,13 @@ from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient -from apps.api.views.features import FEATURE_LIVE_SETTINGS, FEATURE_SLACK, FEATURE_TELEGRAM +from apps.api.views.features import ( + FEATURE_GRAFANA_CLOUD_CONNECTION, + FEATURE_GRAFANA_CLOUD_NOTIFICATIONS, + FEATURE_LIVE_SETTINGS, + FEATURE_SLACK, + FEATURE_TELEGRAM, +) @pytest.mark.django_db @@ -30,15 +36,24 @@ def test_select_features_all_enabled( make_user_auth_headers, ): organization, user, token = make_organization_and_user_with_plugin_token() + settings.OSS_INSTALLATION = True settings.FEATURE_SLACK_INTEGRATION_ENABLED = True settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED = True settings.FEATURE_LIVE_SETTINGS_ENABLED = True + settings.FEATURE_GRAFANA_CLOUD_CONNECTION = True + settings.FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = True client = APIClient() url = reverse("api-internal:features") response = client.get(url, format="json", **make_user_auth_headers(user, token)) assert response.status_code == status.HTTP_200_OK - assert response.json() == [FEATURE_SLACK, FEATURE_TELEGRAM, FEATURE_LIVE_SETTINGS] + assert response.json() == [ + FEATURE_SLACK, + FEATURE_TELEGRAM, + FEATURE_GRAFANA_CLOUD_CONNECTION, + FEATURE_LIVE_SETTINGS, + FEATURE_GRAFANA_CLOUD_NOTIFICATIONS, + ] @pytest.mark.django_db @@ -48,9 +63,12 @@ def test_select_features_all_disabled( make_user_auth_headers, ): organization, user, token = make_organization_and_user_with_plugin_token() + settings.OSS_INSTALLATION = False settings.FEATURE_SLACK_INTEGRATION_ENABLED = False settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED = False settings.FEATURE_LIVE_SETTINGS_ENABLED = False + settings.FEATURE_GRAFANA_CLOUD_CONNECTION = False + settings.FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = FEATURE_GRAFANA_CLOUD_NOTIFICATIONS client = APIClient() url = reverse("api-internal:features") response = client.get(url, format="json", **make_user_auth_headers(user, token)) diff --git a/engine/apps/api/tests/test_user.py b/engine/apps/api/tests/test_user.py index 5731ed17..dd23feb5 100644 --- a/engine/apps/api/tests/test_user.py +++ b/engine/apps/api/tests/test_user.py @@ -75,6 +75,7 @@ def test_update_user_cant_change_email_and_username( "user": admin.username, } }, + "cloud_connection_status": 0, "permissions": ADMIN_PERMISSIONS, "notification_chain_verbal": {"default": "", "important": ""}, "slack_user_identity": None, @@ -124,6 +125,7 @@ def test_list_users( "notification_chain_verbal": {"default": "", "important": ""}, "slack_user_identity": None, "avatar": admin.avatar_url, + "cloud_connection_status": 0, }, { "pk": editor.public_primary_key, @@ -144,6 +146,7 @@ def test_list_users( "notification_chain_verbal": {"default": "", "important": ""}, "slack_user_identity": None, "avatar": editor.avatar_url, + "cloud_connection_status": 0, }, ], } diff --git a/engine/apps/api/views/features.py b/engine/apps/api/views/features.py index 6a4285de..805308a9 100644 --- a/engine/apps/api/views/features.py +++ b/engine/apps/api/views/features.py @@ -4,11 +4,14 @@ from rest_framework.response import Response from rest_framework.views import APIView from apps.auth_token.auth import PluginAuthentication +from apps.base.utils import live_settings FEATURE_SLACK = "slack" FEATURE_TELEGRAM = "telegram" FEATURE_LIVE_SETTINGS = "live_settings" MOBILE_APP_PUSH_NOTIFICATIONS = "mobile_app" +FEATURE_GRAFANA_CLOUD_NOTIFICATIONS = "grafana_cloud_notifications" +FEATURE_GRAFANA_CLOUD_CONNECTION = "grafana_cloud_connection" class FeaturesAPIView(APIView): @@ -31,9 +34,6 @@ class FeaturesAPIView(APIView): if settings.FEATURE_TELEGRAM_INTEGRATION_ENABLED: enabled_features.append(FEATURE_TELEGRAM) - if settings.FEATURE_LIVE_SETTINGS_ENABLED: - enabled_features.append(FEATURE_LIVE_SETTINGS) - if settings.MOBILE_APP_PUSH_NOTIFICATIONS_ENABLED: DynamicSetting = apps.get_model("base", "DynamicSetting") mobile_app_settings = DynamicSetting.objects.get_or_create( @@ -48,4 +48,12 @@ class FeaturesAPIView(APIView): if request.auth.organization.pk in mobile_app_settings.json_value["org_ids"]: enabled_features.append(MOBILE_APP_PUSH_NOTIFICATIONS) + if settings.OSS_INSTALLATION: + # Features below should be enabled only in OSS + enabled_features.append(FEATURE_GRAFANA_CLOUD_CONNECTION) + if settings.FEATURE_LIVE_SETTINGS_ENABLED: + enabled_features.append(FEATURE_LIVE_SETTINGS) + if live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + enabled_features.append(FEATURE_GRAFANA_CLOUD_NOTIFICATIONS) + return enabled_features diff --git a/engine/apps/api/views/live_setting.py b/engine/apps/api/views/live_setting.py index 2ed6d723..1718bd15 100644 --- a/engine/apps/api/views/live_setting.py +++ b/engine/apps/api/views/live_setting.py @@ -12,6 +12,7 @@ from apps.api.serializers.live_setting import LiveSettingSerializer from apps.auth_token.auth import PluginAuthentication from apps.base.models import LiveSetting from apps.base.utils import live_settings +from apps.oss_installation.tasks import sync_users_with_cloud from apps.slack.tasks import unpopulate_slack_user_identities from apps.telegram.client import TelegramClient from apps.telegram.tasks import register_telegram_webhook @@ -32,13 +33,19 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet): def get_queryset(self): LiveSetting.populate_settings_if_needed() - return LiveSetting.objects.filter(name__in=LiveSetting.AVAILABLE_NAMES).order_by("name") + queryset = LiveSetting.objects.filter(name__in=LiveSetting.AVAILABLE_NAMES).order_by("name") + search = self.request.query_params.get("search", None) + if search: + queryset = queryset.filter(name=search) + return queryset def perform_update(self, serializer): new_value = serializer.validated_data["value"] self._update_hook(new_value) - - super().perform_update(serializer) + instance = serializer.save() + sync_users = self.request.query_params.get("sync_users", "true") == "true" + if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN" and sync_users: + sync_users_with_cloud.apply_async() def perform_destroy(self, instance): new_value = instance.default_value @@ -66,6 +73,17 @@ class LiveSettingViewSet(PublicPrimaryKeyMixin, viewsets.ModelViewSet): if sti is not None: unpopulate_slack_user_identities.apply_async((sti.pk, True), countdown=0) + if instance.name == "GRAFANA_CLOUD_ONCALL_TOKEN": + from apps.oss_installation.models import CloudConnector + + try: + old_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN + except ImproperlyConfigured: + old_token = None + + if old_token != new_value: + CloudConnector.remove_sync() + def _reset_telegram_integration(self, new_token): # tell Telegram to cancel sending events from old bot with suppress(ImproperlyConfigured, error.InvalidToken, error.Unauthorized): diff --git a/engine/apps/api/views/user.py b/engine/apps/api/views/user.py index ee0a75de..e7d20a32 100644 --- a/engine/apps/api/views/user.py +++ b/engine/apps/api/views/user.py @@ -34,6 +34,7 @@ from apps.auth_token.models import UserScheduleExportAuthToken from apps.auth_token.models.mobile_app_auth_token import MobileAppAuthToken from apps.auth_token.models.mobile_app_verification_token import MobileAppVerificationToken from apps.base.messaging import get_messaging_backend_from_id +from apps.base.utils import live_settings from apps.telegram.client import TelegramClient from apps.telegram.models import TelegramVerificationCode from apps.twilioapp.phone_manager import PhoneManager @@ -56,7 +57,19 @@ class CurrentUserView(APIView): permission_classes = (IsAuthenticated,) def get(self, request): - serializer = UserSerializer(request.user, context={"request": self.request}) + context = {"request": self.request, "format": self.format_kwarg, "view": self} + + if settings.OSS_INSTALLATION and live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + from apps.oss_installation.models import CloudConnector, CloudUserIdentity + + connector = CloudConnector.objects.first() + if connector is not None: + cloud_identities = list(CloudUserIdentity.objects.filter(email__in=[request.user.email])) + cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities} + context["cloud_identities"] = cloud_identities + context["connector"] = connector + + serializer = UserSerializer(request.user, context=context) return Response(serializer.data) def put(self, request): @@ -179,6 +192,46 @@ class UserView( return queryset.order_by("id") + def list(self, request, *args, **kwargs): + queryset = self.filter_queryset(self.get_queryset()) + + page = self.paginate_queryset(queryset) + if page is not None: + context = {"request": self.request, "format": self.format_kwarg, "view": self} + if settings.OSS_INSTALLATION: + if live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + from apps.oss_installation.models import CloudConnector, CloudUserIdentity + + connector = CloudConnector.objects.first() + if connector is not None: + emails = list(queryset.values_list("email", flat=True)) + cloud_identities = list(CloudUserIdentity.objects.filter(email__in=emails)) + cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities} + context["cloud_identities"] = cloud_identities + context["connector"] = connector + serializer = self.get_serializer(page, many=True, context=context) + return self.get_paginated_response(serializer.data) + + serializer = self.get_serializer(queryset, many=True) + return Response(serializer.data) + + def retrieve(self, request, *args, **kwargs): + context = {"request": self.request, "format": self.format_kwarg, "view": self} + instance = self.get_object() + + if settings.OSS_INSTALLATION and live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + from apps.oss_installation.models import CloudConnector, CloudUserIdentity + + connector = CloudConnector.objects.first() + if connector is not None: + cloud_identities = list(CloudUserIdentity.objects.filter(email__in=[instance.email])) + cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities} + context["cloud_identities"] = cloud_identities + context["connector"] = connector + + serializer = self.get_serializer(instance, context=context) + return Response(serializer.data) + def current(self, request): serializer = UserSerializer(self.get_queryset().get(pk=self.request.user.pk)) return Response(serializer.data) diff --git a/engine/apps/auth_token/auth.py b/engine/apps/auth_token/auth.py index aa1a6251..be4a99f3 100644 --- a/engine/apps/auth_token/auth.py +++ b/engine/apps/auth_token/auth.py @@ -9,7 +9,6 @@ from rest_framework.authentication import BaseAuthentication, get_authorization_ from rest_framework.request import Request from apps.grafana_plugin.helpers.gcom import check_token -from apps.public_api import constants as public_api_constants from apps.user_management.models import User from apps.user_management.models.organization import Organization from common.constants.role import Role @@ -29,12 +28,6 @@ class ApiTokenAuthentication(BaseAuthentication): def authenticate(self, request): auth = get_authorization_header(request).decode("utf-8") - - if auth == public_api_constants.DEMO_AUTH_TOKEN: - user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID) - auth_token = user.auth_tokens.first() - return user, auth_token - user, auth_token = self.authenticate_credentials(auth) if user.role != Role.ADMIN: diff --git a/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py deleted file mode 100644 index 225e0fcb..00000000 --- a/engine/apps/auth_token/migrations/0003_squashed_create_demo_token_instances.py +++ /dev/null @@ -1,40 +0,0 @@ -# Generated by Django 3.2.5 on 2021-08-04 13:02 - -import sys -from django.db import migrations - -from apps.auth_token import constants -from apps.auth_token import crypto -from apps.public_api import constants as public_api_constants - - -def create_demo_token_instances(apps, schema_editor): - if not (len(sys.argv) > 1 and sys.argv[1] == 'test'): - User = apps.get_model('user_management', 'User') - Organization = apps.get_model('user_management', 'Organization') - ApiAuthToken = apps.get_model('auth_token', 'ApiAuthToken') - - organization = Organization.objects.get(public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID) - user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID) - - token_string = crypto.generate_token_string() - digest = crypto.hash_token_string(token_string) - - ApiAuthToken.objects.get_or_create( - name=public_api_constants.DEMO_AUTH_TOKEN, - user=user, - organization=organization, - defaults=dict(token_key=token_string[:constants.TOKEN_KEY_LENGTH], digest=digest) - ) - - -class Migration(migrations.Migration): - - dependencies = [ - ('auth_token', '0002_squashed_initial'), - ('user_management', '0002_squashed_create_demo_token_instances') - ] - - operations = [ - migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop) - ] diff --git a/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py deleted file mode 100644 index a590210a..00000000 --- a/engine/apps/base/migrations/0003_squashed_create_demo_token_instances.py +++ /dev/null @@ -1,74 +0,0 @@ -# Generated by Django 3.2.5 on 2021-08-04 10:45 - -import sys -from django.db import migrations -from django.utils import timezone -from apps.public_api import constants as public_api_constants - - -STEP_WAIT = 0 -STEP_NOTIFY = 1 -NOTIFY_BY_SMS = 1 -NOTIFY_BY_PHONE = 2 -FIVE_MINUTES = timezone.timedelta(minutes=5) - - -def create_demo_token_instances(apps, schema_editor): - if not (len(sys.argv) > 1 and sys.argv[1] == 'test'): - User = apps.get_model('user_management', 'User') - UserNotificationPolicy = apps.get_model("base", "UserNotificationPolicy") - - user = User.objects.get(public_primary_key=public_api_constants.DEMO_USER_ID) - - UserNotificationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1, - defaults=dict( - important=False, - user=user, - notify_by=NOTIFY_BY_SMS, - step=STEP_NOTIFY, - order=0, - ) - ) - UserNotificationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2, - defaults=dict( - important=False, - user=user, - step=STEP_WAIT, - wait_delay=FIVE_MINUTES, - order=1, - ) - ) - UserNotificationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3, - defaults=dict( - important=False, - user=user, - step=STEP_NOTIFY, - notify_by=NOTIFY_BY_PHONE, - order=2, - ) - ) - - UserNotificationPolicy.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4, - defaults=dict( - important=True, - user=user, - notify_by=NOTIFY_BY_PHONE, - order=0, - ) - ) - - -class Migration(migrations.Migration): - - dependencies = [ - ('base', '0002_squashed_initial'), - ('user_management', '0002_squashed_create_demo_token_instances') - ] - - operations = [ - migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop) - ] diff --git a/engine/apps/base/models/live_setting.py b/engine/apps/base/models/live_setting.py index c08ab11f..59126f3d 100644 --- a/engine/apps/base/models/live_setting.py +++ b/engine/apps/base/models/live_setting.py @@ -38,35 +38,45 @@ class LiveSetting(models.Model): "TWILIO_NUMBER", "TWILIO_VERIFY_SERVICE_SID", "TELEGRAM_TOKEN", + "TELEGRAM_WEBHOOK_HOST", "SLACK_CLIENT_OAUTH_ID", "SLACK_CLIENT_OAUTH_SECRET", "SLACK_SIGNING_SECRET", + "SLACK_INSTALL_RETURN_REDIRECT_HOST", "SEND_ANONYMOUS_USAGE_STATS", "GRAFANA_CLOUD_ONCALL_TOKEN", "GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED", + "GRAFANA_CLOUD_NOTIFICATIONS_ENABLED", ) DESCRIPTIONS = { "SLACK_SIGNING_SECRET": ( "Check this instruction for details how to set up Slack. " + "https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup" + "'>instruction for details how to set up Slack. " "Slack secrets can't be verified on the backend, please try installing the Slack Bot " - "after you update Slack credentials." + "after you update them." ), "SLACK_CLIENT_OAUTH_SECRET": ( "Check this instruction for details how to set up Slack. " + "https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup" + "'>instruction for details how to set up Slack. " "Slack secrets can't be verified on the backend, please try installing the Slack Bot " - "after you update Slack credentials." + "after you update them." ), "SLACK_CLIENT_OAUTH_ID": ( "Check this instruction for details how to set up Slack. " + "https://grafana.com/docs/grafana-cloud/oncall/open-source/#slack-setup" + "'>instruction for details how to set up Slack. " "Slack secrets can't be verified on the backend, please try installing the Slack Bot " - "after you update Slack credentials." + "after you update them." + ), + "SLACK_INSTALL_RETURN_REDIRECT_HOST": ( + "Check instruction for details how to set up Slack. " + "Slack secrets can't be verified on the backend, please try installing the Slack Bot " + "after you update them." ), "TWILIO_ACCOUNT_SID": ( "Twilio username to allow amixr send sms and make phone calls, " @@ -99,13 +109,17 @@ class LiveSetting(models.Model): "TELEGRAM_TOKEN": ( "Secret token for Telegram bot, you can get one via " "BotFather." ), + "TELEGRAM_WEBHOOK_HOST": ( + "Externally available URL for Telegram to make requests. Please restart OnCall backend after after update." + ), "SEND_ANONYMOUS_USAGE_STATS": ( "Grafana OnCall will send anonymous, but uniquely-identifiable usage analytics to Grafana Labs." " These statistics are sent to https://stats.grafana.org/. For more information on what's sent, look at" - "https://github.com/..." # TODO: add url to usage stats code + " https://github.com/grafana/oncall/blob/dev/engine/apps/oss_installation/usage_stats.py#L29" ), "GRAFANA_CLOUD_ONCALL_TOKEN": "Secret token for Grafana Cloud OnCall instance.", "GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED": "Enable hearbeat integration with Grafana Cloud OnCall.", + "GRAFANA_CLOUD_NOTIFICATIONS_ENABLED": "Enable SMS/call notifications via Grafana Cloud OnCall", } SECRET_SETTING_NAMES = ( @@ -171,4 +185,5 @@ class LiveSetting(models.Model): ) self.error = LiveSettingValidator(live_setting=self).get_error() + super().save(*args, **kwargs) diff --git a/engine/apps/base/models/user_notification_policy_log_record.py b/engine/apps/base/models/user_notification_policy_log_record.py index 93fd0820..15f86067 100644 --- a/engine/apps/base/models/user_notification_policy_log_record.py +++ b/engine/apps/base/models/user_notification_policy_log_record.py @@ -68,7 +68,8 @@ class UserNotificationPolicyLogRecord(models.Model): ERROR_NOTIFICATION_IN_SLACK_CHANNEL_IS_ARCHIVED, ERROR_NOTIFICATION_IN_SLACK_RATELIMIT, ERROR_NOTIFICATION_MESSAGING_BACKEND_ERROR, - ) = range(25) + ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE, + ) = range(26) # for this errors we want to send message to general log channel ERRORS_TO_SEND_IN_SLACK_CHANNEL = [ @@ -266,6 +267,10 @@ class UserNotificationPolicyLogRecord(models.Model): result += f"failed to notify {user_verbal} in Slack, because channel is archived" elif self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_IN_SLACK_RATELIMIT: result += f"failed to notify {user_verbal} in Slack due to Slack rate limit" + elif ( + self.notification_error_code == UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ALLOWED_USER_ROLE + ): + result += f"failed to notify {user_verbal}, not allowed role" else: # TODO: handle specific backend errors try: diff --git a/engine/apps/base/utils.py b/engine/apps/base/utils.py index 7342d00e..8339e295 100644 --- a/engine/apps/base/utils.py +++ b/engine/apps/base/utils.py @@ -94,6 +94,13 @@ class LiveSettingValidator: except Exception as e: return f"Telegram error: {str(e)}" + @classmethod + def _check_grafana_cloud_oncall_token(cls, grafana_oncall_token): + from apps.oss_installation.models import CloudConnector + + _, err = CloudConnector.sync_with_cloud(grafana_oncall_token) + return err + @staticmethod def _is_email_valid(email): return re.match(r"^[^@]+@[^@]+\.[^@]+$", email) diff --git a/engine/apps/grafana_plugin/tasks/sync.py b/engine/apps/grafana_plugin/tasks/sync.py index 2d6c37bd..5ee38fe2 100644 --- a/engine/apps/grafana_plugin/tasks/sync.py +++ b/engine/apps/grafana_plugin/tasks/sync.py @@ -6,7 +6,6 @@ from django.utils import timezone from apps.grafana_plugin.helpers import GcomAPIClient from apps.grafana_plugin.helpers.gcom import get_active_instance_ids -from apps.public_api.constants import DEMO_ORGANIZATION_ID from apps.user_management.models import Organization from apps.user_management.sync import sync_organization from common.custom_celery_tasks import shared_dedicated_queue_retry_task @@ -23,9 +22,7 @@ SYNC_PERIOD = timezone.timedelta(minutes=25) def start_sync_organizations(): sync_threshold = timezone.now() - SYNC_PERIOD - organization_qs = Organization.objects.exclude(public_primary_key=DEMO_ORGANIZATION_ID).filter( - last_time_synced__lte=sync_threshold - ) + organization_qs = Organization.objects.filter(last_time_synced__lte=sync_threshold) active_instance_ids, is_cloud_configured = get_active_instance_ids() if is_cloud_configured: diff --git a/engine/apps/integrations/metadata/configuration/amazon_sns.py b/engine/apps/integrations/metadata/configuration/amazon_sns.py deleted file mode 100644 index 954542d0..00000000 --- a/engine/apps/integrations/metadata/configuration/amazon_sns.py +++ /dev/null @@ -1,99 +0,0 @@ -# Main -enabled = True -title = "Amazon SNS" -slug = "amazon_sns" -short_description = None -is_displayed_on_web = True -description = None -is_featured = False -is_able_to_autoresolve = True -is_demo_alert_enabled = True - -description = None - -# Default templates -slack_title = """\ -{% if payload|length == 0 -%} -{% set title = payload.get("AlarmName", "Alert") %} -{%- else -%} -{% set title = "Alert" %} -{%- endif %} - -*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ title }}>* via {{ integration_name }} -{% if source_link %} - (*<{{ source_link }}|source>*) -{%- endif %}""" - -slack_message = """\ -{% if payload|length == 1 and "message" in payload -%} -{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }} -{%- else -%} -*State* {{ payload.get("NewStateValue", "NO") }} -Region: {{ payload.get("Region", "Undefined") }} -_Description_: {{ payload.get("AlarmDescription", "Undefined") }} -{%- endif %} -""" - -slack_image_url = None - -web_title = """\ -{% if payload|length == 0 -%} -{{ payload.get("AlarmName", "Alert")}} -{%- else -%} -Alert -{%- endif %}""" - -web_message = """\ -{% if payload|length == 1 and "message" in payload -%} -{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }} -{%- else -%} -**State** {{ payload.get("NewStateValue", "NO") }} -Region: {{ payload.get("Region", "Undefined") }} -*Description*: {{ payload.get("AlarmDescription", "Undefined") }} -{%- endif %} -""" - -web_image_url = slack_image_url - -sms_title = web_title - -phone_call_title = web_title - -email_title = web_title - -email_message = "{{ payload|tojson_pretty }}" - -telegram_title = sms_title - -telegram_message = """\ -{% if payload|length == 1 and "message" in payload -%} -{{ payload.get("message", "Non-JSON payload received. Please make sure you publish monitoring Alarms to SNS, not logs: https://docs.amixr.io/#/integrations/amazon_sns") }} -{%- else -%} -State {{ payload.get("NewStateValue", "NO") }} -Region: {{ payload.get("Region", "Undefined") }} -Description: {{ payload.get("AlarmDescription", "Undefined") }} -{%- endif %} -""" - -telegram_image_url = slack_image_url - -source_link = """\ -{% if payload|length == 0 -%} -{% if payload.get("Trigger", {}).get("Namespace") == "AWS/ElasticBeanstalk" -%} -https://console.aws.amazon.com/elasticbeanstalk/home?region={{ payload.get("TopicArn").split(":")[3] }} -{%- else -%} -https://console.aws.amazon.com/cloudwatch//home?region={{ payload.get("TopicArn").split(":")[3] }} -{%- endif %} -{%- endif %}""" - -grouping_id = web_title - -resolve_condition = """\ -{{ payload.get("NewStateValue", "") == "OK" }} -""" - -acknowledge_condition = None - -group_verbose_name = web_title - -example_payload = {"foo": "bar"} diff --git a/engine/apps/oss_installation/cloud_heartbeat.py b/engine/apps/oss_installation/cloud_heartbeat.py new file mode 100644 index 00000000..8d445e83 --- /dev/null +++ b/engine/apps/oss_installation/cloud_heartbeat.py @@ -0,0 +1,110 @@ +import logging +import random +from urllib.parse import urljoin + +import requests +from django.apps import apps +from django.conf import settings +from rest_framework import status + +from apps.base.utils import live_settings + +logger = logging.getLogger(__name__) + + +def setup_heartbeat_integration(name=None): + """Setup Grafana Cloud OnCall heartbeat integration.""" + CloudHeartbeat = apps.get_model("oss_installation", "CloudHeartbeat") + + cloud_heartbeat = None + api_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN + if not live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED or not api_token: + return cloud_heartbeat + # don't specify a team in the data, so heartbeat integration will be created in the General. + name = name or f"OnCall Cloud Heartbeat {settings.BASE_URL}" + data = {"type": "formatted_webhook", "name": name} + url = urljoin(settings.GRAFANA_CLOUD_ONCALL_API_URL, "/api/v1/integrations/") + try: + headers = {"Authorization": api_token} + r = requests.post(url=url, data=data, headers=headers, timeout=5) + if r.status_code == status.HTTP_201_CREATED: + response_data = r.json() + cloud_heartbeat, _ = CloudHeartbeat.objects.update_or_create( + defaults={"integration_id": response_data["id"], "integration_url": response_data["heartbeat"]["link"]} + ) + if r.status_code == status.HTTP_400_BAD_REQUEST: + response_data = r.json() + error = response_data["detail"] + if error == "Integration with this name already exists": + response = requests.get(url=f"{url}?name={name}", headers=headers) + integrations = response.json().get("results", []) + if len(integrations) == 1: + integration = integrations[0] + cloud_heartbeat, _ = CloudHeartbeat.objects.update_or_create( + defaults={ + "integration_id": integration["id"], + "integration_url": integration["heartbeat"]["link"], + } + ) + else: + setup_heartbeat_integration(f"{name} { random.randint(1, 1024)}") + except requests.Timeout: + logger.warning("Unable to create cloud heartbeat integration. Request timeout.") + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to create cloud heartbeat integration. Request exception {str(e)}.") + return cloud_heartbeat + + +def send_cloud_heartbeat(): + CloudHeartbeat = apps.get_model("oss_installation", "CloudHeartbeat") + CloudConnector = apps.get_model("oss_installation", "CloudConnector") + """Send heartbeat to Grafana Cloud OnCall integration.""" + if not live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED or not live_settings.GRAFANA_CLOUD_ONCALL_TOKEN: + logger.info( + "Unable to send cloud heartbeat. Check values for GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED and GRAFANA_CLOUD_ONCALL_TOKEN." + ) + return + connector = CloudConnector.objects.first() + if connector is None: + logger.info("Unable to send cloud heartbeat. Cloud is not connected") + return + logger.info("Start send cloud heartbeat") + try: + cloud_heartbeat = CloudHeartbeat.objects.get() + except CloudHeartbeat.DoesNotExist: + cloud_heartbeat = setup_heartbeat_integration() + + if cloud_heartbeat is None: + logger.warning("Unable to setup cloud heartbeat integration.") + return + cloud_heartbeat.success = False + try: + response = requests.get(cloud_heartbeat.integration_url, timeout=5) + logger.info(f"Send cloud heartbeat with response {response.status_code}") + except requests.Timeout: + logger.warning("Unable to send cloud heartbeat. Request timeout.") + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to send cloud heartbeat. Request exception {str(e)}.") + else: + if response.status_code == status.HTTP_200_OK: + cloud_heartbeat.success = True + logger.info("Successfully send cloud heartbeat") + elif response.status_code == status.HTTP_403_FORBIDDEN: + # check for 403 because AlertChannelDefiningMixin returns 403 if no integration was found. + logger.info("Failed to send cloud heartbeat. Integration was not created yet") + # force re-creation on next run + cloud_heartbeat.delete() + else: + logger.info(f"Failed to send cloud heartbeat. response {response.status_code}") + # save result of cloud heartbeat if it wasn't deleted + if cloud_heartbeat.pk is not None: + cloud_heartbeat.save() + logger.info("Finish send cloud heartbeat") + + +def get_heartbeat_link(connector, heartbeat): + if connector is None: + return None + if heartbeat is None: + return None + return urljoin(connector.cloud_url, f"a/grafana-oncall-app/?page=integrations&id={heartbeat.integration_id}") diff --git a/engine/apps/oss_installation/constants.py b/engine/apps/oss_installation/constants.py new file mode 100644 index 00000000..11f3dc48 --- /dev/null +++ b/engine/apps/oss_installation/constants.py @@ -0,0 +1,4 @@ +CLOUD_NOT_SYNCED = 0 +CLOUD_SYNCED_USER_NOT_FOUND = 1 +CLOUD_SYNCED_PHONE_NOT_VERIFIED = 2 +CLOUD_SYNCED_PHONE_VERIFIED = 3 diff --git a/engine/apps/oss_installation/migrations/0001_squashed_initial.py b/engine/apps/oss_installation/migrations/0001_squashed_initial.py index dac55f47..b1a34cbd 100644 --- a/engine/apps/oss_installation/migrations/0001_squashed_initial.py +++ b/engine/apps/oss_installation/migrations/0001_squashed_initial.py @@ -30,4 +30,20 @@ class Migration(migrations.Migration): ('report_sent_at', models.DateTimeField(default=None, null=True)), ], ), + migrations.CreateModel( + name='CloudConnector', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('cloud_url', models.URLField()), + ], + ), + migrations.CreateModel( + name='CloudUserIdentity', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('phone_number_verified', models.BooleanField(default=False)), + ('cloud_id', models.CharField(max_length=20)), + ('email', models.EmailField(max_length=254)), + ], + ), ] diff --git a/engine/apps/oss_installation/models/__init__.py b/engine/apps/oss_installation/models/__init__.py index 53dea35e..beab1774 100644 --- a/engine/apps/oss_installation/models/__init__.py +++ b/engine/apps/oss_installation/models/__init__.py @@ -1,2 +1,4 @@ -from .heartbeat import CloudHeartbeat # noqa: F401 +from .cloud_connector import CloudConnector # noqa: F401 +from .cloud_heartbeat import CloudHeartbeat # noqa: F401 +from .cloud_user_identity import CloudUserIdentity # noqa: F401 from .oss_installation import OssInstallation # noqa: F401 diff --git a/engine/apps/oss_installation/models/cloud_connector.py b/engine/apps/oss_installation/models/cloud_connector.py new file mode 100644 index 00000000..38541bf5 --- /dev/null +++ b/engine/apps/oss_installation/models/cloud_connector.py @@ -0,0 +1,155 @@ +import logging +from urllib.parse import urljoin + +import requests +from django.db import models, transaction + +from apps.base.utils import live_settings +from apps.oss_installation.models.cloud_user_identity import CloudUserIdentity +from apps.user_management.models import User +from common.constants.role import Role +from settings.base import GRAFANA_CLOUD_ONCALL_API_URL + +logger = logging.getLogger(__name__) + + +class CloudConnector(models.Model): + """ + CloudOrganizationConnector model represents connection between oss organization and cloud organization. + """ + + cloud_url = models.URLField() + + @classmethod + def sync_with_cloud(cls, token=None): + """ + sync_with_cloud sync organization with cloud organization defined by provided GRAFANA_CLOUD_ONCALL_TOKEN. + """ + sync_status = False + error_msg = None + + api_token = token or live_settings.GRAFANA_CLOUD_ONCALL_TOKEN + if api_token is None: + logger.warning("Unable to sync with cloud. GRAFANA_CLOUD_ONCALL_TOKEN is not set") + error_msg = "GRAFANA_CLOUD_ONCALL_TOKEN is not set" + else: + info_url = urljoin(GRAFANA_CLOUD_ONCALL_API_URL, "api/v1/info/") + try: + r = requests.get(info_url, headers={"AUTHORIZATION": api_token}, timeout=5) + if r.status_code == 200: + connector, _ = cls.objects.get_or_create() + connector.cloud_url = r.json()["url"] + connector.save() + elif r.status_code == 403: + logger.warning("Unable to sync with cloud. GRAFANA_CLOUD_ONCALL_TOKEN is invalid") + error_msg = "Invalid token" + else: + error_msg = f"Non-200 HTTP code. Got {r.status_code}" + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to sync with cloud. Request exception {str(e)}") + error_msg = f"Unable to sync with cloud" + + return sync_status, error_msg + + def sync_users_with_cloud(self) -> tuple[bool, str]: + sync_status = False + error_msg = None + + api_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN + if api_token is None: + logger.warning("Unable to sync with cloud. GRAFANA_CLOUD_ONCALL_TOKEN is not set") + error_msg = "GRAFANA_CLOUD_ONCALL_TOKEN is not set" + + existing_emails = list(User.objects.filter(role__in=(Role.ADMIN, Role.EDITOR)).values_list("email", flat=True)) + matching_users = [] + users_url = urljoin(GRAFANA_CLOUD_ONCALL_API_URL, "api/v1/users") + + fetch_next_page = True + users_fetched = True + page = 1 + while fetch_next_page: + try: + url = urljoin(users_url, f"?page={page}&?short=true") + r = requests.get(url, headers={"AUTHORIZATION": api_token}, timeout=5) + if r.status_code != 200: + logger.warning( + f"Unable to fetch page {page} while sync_users_with_cloud. Response status code {r.status_code}" + ) + error_msg = f"Non-200 HTTP code. Got {r.status_code}" + users_fetched = False + break + data = r.json() + matching_users.extend(list(filter(lambda u: (u["email"] in existing_emails), data["results"]))) + page += 1 + if data["next"] is None: + fetch_next_page = False + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to sync users with cloud. Request exception {str(e)}") + error_msg = f"Unable to sync with cloud" + users_fetched = False + break + + if users_fetched: + with transaction.atomic(): + cloud_users_identities_to_create = [] + for user in matching_users: + cloud_users_identities_to_create.append( + CloudUserIdentity( + cloud_id=user["id"], + email=user["email"], + phone_number_verified=user["is_phone_number_verified"], + ) + ) + + CloudUserIdentity.objects.all().delete() + CloudUserIdentity.objects.bulk_create(cloud_users_identities_to_create, batch_size=1000) + sync_status = True + return sync_status, error_msg + + def sync_user_with_cloud(self, user): + sync_status = False + error_msg = None + + api_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN + if api_token is None: + logger.warning(f"Unable to sync_user_with cloud user_id {user.id}. GRAFANA_CLOUD_ONCALL_TOKEN is not set") + error_msg = "GRAFANA_CLOUD_ONCALL_TOKEN is not set" + else: + url = urljoin(GRAFANA_CLOUD_ONCALL_API_URL, f"api/v1/users/?email={user.email}") + try: + r = requests.get(url, headers={"AUTHORIZATION": api_token}, timeout=5) + if r.status_code != 200: + logger.warning( + f"Unable to sync_user_with_cloud user_id {user.id}. Response status code {r.status_code}" + ) + error_msg = f"Non-200 HTTP code. Got {r.status_code}" + else: + data = r.json() + if len(data["results"]) != 0: + cloud_used_data = data["results"][0] + with transaction.atomic(): + CloudUserIdentity.objects.filter(email=user.email).delete() + CloudUserIdentity.objects.create( + email=user.email, + phone_number_verified=cloud_used_data["is_phone_number_verified"], + cloud_id=cloud_used_data["id"], + ) + sync_status = True + else: + logger.warning( + f"Unable to sync_user_with_cloud user_id {user.id}. User with {user.email} not found" + ) + error_msg = f"User with email not found {user.email}" + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to sync_user_with cloud user_id {user.id}. Request exception {str(e)}") + error_msg = f"Unable to sync with cloud" + + return sync_status, error_msg + + @classmethod + def remove_sync(cls): + from apps.oss_installation.models import CloudHeartbeat + + cls.objects.all().delete() + CloudUserIdentity.objects.all().delete() + CloudHeartbeat.objects.all().delete() diff --git a/engine/apps/oss_installation/models/heartbeat.py b/engine/apps/oss_installation/models/cloud_heartbeat.py similarity index 100% rename from engine/apps/oss_installation/models/heartbeat.py rename to engine/apps/oss_installation/models/cloud_heartbeat.py diff --git a/engine/apps/oss_installation/models/cloud_user_identity.py b/engine/apps/oss_installation/models/cloud_user_identity.py new file mode 100644 index 00000000..ec83ac2f --- /dev/null +++ b/engine/apps/oss_installation/models/cloud_user_identity.py @@ -0,0 +1,7 @@ +from django.db import models + + +class CloudUserIdentity(models.Model): + phone_number_verified = models.BooleanField(default=False) + cloud_id = models.CharField(max_length=20) + email = models.EmailField() diff --git a/engine/apps/oss_installation/models/oss_installation.py b/engine/apps/oss_installation/models/oss_installation.py index 9e4dd3dd..2e553fcf 100644 --- a/engine/apps/oss_installation/models/oss_installation.py +++ b/engine/apps/oss_installation/models/oss_installation.py @@ -1,9 +1,16 @@ +import logging import uuid from django.db import models +logger = logging.getLogger(__name__) + class OssInstallation(models.Model): + """ + OssInstallation is model to track installation of OSS OnCall version. + """ + installation_id = models.UUIDField(default=uuid.uuid4, editable=False) created_at = models.DateTimeField(auto_now=True) report_sent_at = models.DateTimeField(null=True, default=None) diff --git a/engine/apps/oss_installation/serializers/__init__.py b/engine/apps/oss_installation/serializers/__init__.py new file mode 100644 index 00000000..991cf99b --- /dev/null +++ b/engine/apps/oss_installation/serializers/__init__.py @@ -0,0 +1 @@ +from .cloud_user import CloudUserSerializer # noqa: F401 diff --git a/engine/apps/oss_installation/serializers/cloud_user.py b/engine/apps/oss_installation/serializers/cloud_user.py new file mode 100644 index 00000000..53ccd808 --- /dev/null +++ b/engine/apps/oss_installation/serializers/cloud_user.py @@ -0,0 +1,20 @@ +from rest_framework import serializers + +from apps.oss_installation.models import CloudConnector, CloudUserIdentity +from apps.oss_installation.utils import cloud_user_identity_status +from apps.user_management.models import User + + +class CloudUserSerializer(serializers.ModelSerializer): + cloud_data = serializers.SerializerMethodField() + + class Meta: + model = User + fields = ["cloud_data"] + + def get_cloud_data(self, obj): + connector = CloudConnector.objects.filter().first() + cloud_user_identity = CloudUserIdentity.objects.filter(email=obj.email).first() + status, link = cloud_user_identity_status(connector, cloud_user_identity) + cloud_data = {"status": status, "link": link} + return cloud_data diff --git a/engine/apps/oss_installation/tasks.py b/engine/apps/oss_installation/tasks.py index 2c11a54a..56e3678a 100644 --- a/engine/apps/oss_installation/tasks.py +++ b/engine/apps/oss_installation/tasks.py @@ -1,13 +1,9 @@ -from urllib.parse import urljoin - -import requests from celery.utils.log import get_task_logger -from django.conf import settings +from django.apps import apps from django.utils import timezone -from rest_framework import status from apps.base.utils import live_settings -from apps.oss_installation.models import CloudHeartbeat, OssInstallation +from apps.oss_installation.cloud_heartbeat import send_cloud_heartbeat from apps.oss_installation.usage_stats import UsageStatsService from common.custom_celery_tasks import shared_dedicated_queue_retry_task @@ -17,6 +13,8 @@ logger = get_task_logger(__name__) @shared_dedicated_queue_retry_task() def send_usage_stats_report(): logger.info("Start send_usage_stats_report") + OssInstallation = apps.get_model("oss_installation", "OssInstallation") + installation = OssInstallation.objects.get_or_create()[0] enabled = live_settings.SEND_ANONYMOUS_USAGE_STATS if enabled: @@ -30,66 +28,24 @@ def send_usage_stats_report(): logger.info("Finish send_usage_stats_report") -def _setup_heartbeat_integration(): - """Setup Grafana Cloud OnCall heartbeat integration.""" - cloud_heartbeat = None - api_token = live_settings.GRAFANA_CLOUD_ONCALL_TOKEN - # don't specify a team in the data, so heartbeat integration will be created in the General. - data = {"type": "formatted_webhook", "name": f"OnCall {settings.BASE_URL}"} - url = urljoin(settings.GRAFANA_CLOUD_ONCALL_API_URL, "/api/v1/integrations/") - try: - headers = {"Authorization": api_token} - r = requests.post(url=url, data=data, headers=headers, timeout=5) - if r.status_code == status.HTTP_201_CREATED: - response_data = r.json() - cloud_heartbeat, _ = CloudHeartbeat.objects.update_or_create( - defaults={"integration_id": response_data["id"], "integration_url": response_data["heartbeat"]["link"]} - ) - except requests.Timeout: - logger.warning("Unable to create cloud heartbeat integration. Request timeout.") - except requests.exceptions.RequestException as e: - logger.warning(f"Unable to create cloud heartbeat integration. Request exception {str(e)}.") - return cloud_heartbeat +@shared_dedicated_queue_retry_task() +def send_cloud_heartbeat_task(): + send_cloud_heartbeat() @shared_dedicated_queue_retry_task() -def send_cloud_heartbeat(): - """Send heartbeat to Grafana Cloud OnCall integration.""" - if not live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED or not live_settings.GRAFANA_CLOUD_ONCALL_TOKEN: - logger.info( - "Unable to send cloud heartbeat. Check values for GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED and GRAFANA_CLOUD_ONCALL_TOKEN." - ) - return - - logger.info("Start send cloud heartbeat") - try: - cloud_heartbeat = CloudHeartbeat.objects.get() - except CloudHeartbeat.DoesNotExist: - cloud_heartbeat = _setup_heartbeat_integration() - - if cloud_heartbeat is None: - logger.warning("Unable to setup cloud heartbeat integration.") - return - cloud_heartbeat.success = False - try: - response = requests.get(cloud_heartbeat.integration_url, timeout=5) - logger.info(f"Send cloud heartbeat with response {response.status_code}") - except requests.Timeout: - logger.warning("Unable to send cloud heartbeat. Request timeout.") - except requests.exceptions.RequestException as e: - logger.warning(f"Unable to send cloud heartbeat. Request exception {str(e)}.") - else: - if response.status_code == status.HTTP_200_OK: - cloud_heartbeat.success = True - logger.info("Successfully send cloud heartbeat") - elif response.status_code == status.HTTP_403_FORBIDDEN: - # check for 403 because AlertChannelDefiningMixin returns 403 if no integration was found. - logger.info("Failed to send cloud heartbeat. Integration was not created yet") - # force re-creation on next run - cloud_heartbeat.delete() +def sync_users_with_cloud(): + CloudConnector = apps.get_model("oss_installation", "CloudConnector") + logger.info("Start sync_users_with_cloud") + if live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED: + connector = CloudConnector.objects.first() + if connector is not None: + status, error = connector.sync_users_with_cloud() + log_message = "Users synced. Status {status}." + if error: + log_message += f" Error {error}" + logger.info(log_message) else: - logger.info(f"Failed to send cloud heartbeat. response {response.status_code}") - # save result of cloud heartbeat if it wasn't deleted - if cloud_heartbeat.pk is not None: - cloud_heartbeat.save() - logger.info("Finish send cloud heartbeat") + logger.info("Grafana Cloud is not connected") + else: + logger.info("GRAFANA_CLOUD_NOTIFICATIONS_ENABLED is not enabled") diff --git a/engine/apps/oss_installation/urls.py b/engine/apps/oss_installation/urls.py index 956ffe74..ddf04020 100644 --- a/engine/apps/oss_installation/urls.py +++ b/engine/apps/oss_installation/urls.py @@ -1,7 +1,15 @@ -from common.api_helpers.optional_slash_router import optional_slash_path +from django.urls import include, path -from .views import CloudHeartbeatStatusView +from common.api_helpers.optional_slash_router import OptionalSlashRouter, optional_slash_path + +from .views import CloudConnectionView, CloudHeartbeatView, CloudUsersView, CloudUserView + +router = OptionalSlashRouter() +router.register("cloud_users", CloudUserView, basename="cloud-users") urlpatterns = [ - optional_slash_path("cloud_heartbeat_status", CloudHeartbeatStatusView.as_view(), name="cloud_heartbeat_status"), + path("", include(router.urls)), + optional_slash_path("cloud_users", CloudUsersView.as_view(), name="cloud-users-list"), + optional_slash_path("cloud_connection", CloudConnectionView.as_view(), name="cloud-connection-status"), + optional_slash_path("cloud_heartbeat", CloudHeartbeatView.as_view(), name="cloud-heartbeat"), ] diff --git a/engine/apps/oss_installation/usage_stats.py b/engine/apps/oss_installation/usage_stats.py index db90cce8..b3a1bd43 100644 --- a/engine/apps/oss_installation/usage_stats.py +++ b/engine/apps/oss_installation/usage_stats.py @@ -3,11 +3,11 @@ import platform from dataclasses import asdict, dataclass import requests +from django.apps import apps from django.conf import settings from django.db.models import Sum from apps.alerts.models import AlertGroupCounter -from apps.oss_installation.models import OssInstallation from apps.oss_installation.utils import active_oss_users_count USAGE_STATS_URL = "https://stats.grafana.org/oncall-usage-report" @@ -27,9 +27,12 @@ class UsageStatsReport: class UsageStatsService: def get_usage_stats_report(self): + OssInstallation = apps.get_model("oss_installation", "OssInstallation") metrics = {} metrics["active_users_count"] = active_oss_users_count() - total_alert_groups = AlertGroupCounter.objects.aggregate(Sum("value")).get("value__sum", 0) + total_alert_groups = AlertGroupCounter.objects.aggregate(Sum("value")).get("value__sum", None) + if total_alert_groups is None: + total_alert_groups = 0 metrics["alert_groups_count"] = total_alert_groups usage_stats_id = OssInstallation.objects.get_or_create()[0].installation_id diff --git a/engine/apps/oss_installation/utils.py b/engine/apps/oss_installation/utils.py index fcfb537c..4aad084a 100644 --- a/engine/apps/oss_installation/utils.py +++ b/engine/apps/oss_installation/utils.py @@ -1,19 +1,23 @@ -from contextlib import suppress +import logging +from urllib.parse import urljoin +from django.apps import apps from django.utils import timezone -from apps.alerts.models import AlertGroupLogRecord, EscalationPolicy -from apps.base.models import UserNotificationPolicyLogRecord -from apps.public_api.constants import DEMO_USER_ID +from apps.oss_installation import constants as oss_constants from apps.schedules.ical_utils import list_users_to_notify_from_ical_for_period -from apps.schedules.models import OnCallSchedule -from apps.user_management.models import User + +logger = logging.getLogger(__name__) def active_oss_users_count(): """ active_oss_users_count returns count of active users of oss installation. """ + OnCallSchedule = apps.get_model("schedules", "OnCallSchedule") + AlertGroupLogRecord = apps.get_model("alerts", "AlertGroupLogRecord") + EscalationPolicy = apps.get_model("alerts", "EscalationPolicy") + UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") # Take logs for previous 24 hours start = timezone.now() - timezone.timedelta(hours=24) @@ -62,9 +66,21 @@ def active_oss_users_count(): for user in users_from_schedule: unique_active_users.add(user.pk) - # Remove demo user from active users - with suppress(User.DoesNotExist): - demo_user = User.objects.get(public_primary_key=DEMO_USER_ID) - with suppress(KeyError): - unique_active_users.remove(demo_user.pk) return len(unique_active_users) + + +def cloud_user_identity_status(connector, identity): + link = None + if connector is None: + status = oss_constants.CLOUD_NOT_SYNCED + elif identity is None: + status = oss_constants.CLOUD_SYNCED_USER_NOT_FOUND + link = connector.cloud_url + else: + if identity.phone_number_verified: + status = oss_constants.CLOUD_SYNCED_PHONE_VERIFIED + else: + status = oss_constants.CLOUD_SYNCED_PHONE_NOT_VERIFIED + + link = urljoin(connector.cloud_url, f"a/grafana-oncall-app/?page=users&p=1&id={identity.cloud_id}") + return status, link diff --git a/engine/apps/oss_installation/views/__init__.py b/engine/apps/oss_installation/views/__init__.py index 0716482b..b3c50ba3 100644 --- a/engine/apps/oss_installation/views/__init__.py +++ b/engine/apps/oss_installation/views/__init__.py @@ -1 +1,3 @@ -from .cloud_heartbeat_status import CloudHeartbeatStatusView # noqa: F401 +from .cloud_connection import CloudConnectionView # noqa: F401 +from .cloud_heartbeat import CloudHeartbeatView # noqa: F401 +from .cloud_users import CloudUsersView, CloudUserView # noqa: F401 diff --git a/engine/apps/oss_installation/views/cloud_connection.py b/engine/apps/oss_installation/views/cloud_connection.py new file mode 100644 index 00000000..21b6624c --- /dev/null +++ b/engine/apps/oss_installation/views/cloud_connection.py @@ -0,0 +1,39 @@ +from rest_framework import status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from rest_framework.views import APIView + +from apps.api.permissions import IsAdmin +from apps.auth_token.auth import PluginAuthentication +from apps.base.models import LiveSetting +from apps.base.utils import live_settings +from apps.oss_installation.cloud_heartbeat import get_heartbeat_link +from apps.oss_installation.models import CloudConnector, CloudHeartbeat + + +class CloudConnectionView(APIView): + authentication_classes = (PluginAuthentication,) + permission_classes = (IsAuthenticated, IsAdmin) + + def get(self, request): + connector = CloudConnector.objects.first() + heartbeat = CloudHeartbeat.objects.first() + response = { + "cloud_connection_status": connector is not None, + "cloud_notifications_enabled": live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED, + "cloud_heartbeat_enabled": live_settings.GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED, + "cloud_heartbeat_link": get_heartbeat_link(connector, heartbeat), + "cloud_heartbeat_status": heartbeat is not None and heartbeat.success, + } + return Response(response) + + def delete(self, request): + s = LiveSetting.objects.filter(name="GRAFANA_CLOUD_ONCALL_TOKEN").first() + if s is not None: + s.value = None + s.save() + connector = CloudConnector.objects.first() + if connector is None: + return Response(status=status.HTTP_404_NOT_FOUND) + connector.remove_sync() + return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/engine/apps/oss_installation/views/cloud_heartbeat.py b/engine/apps/oss_installation/views/cloud_heartbeat.py new file mode 100644 index 00000000..932087c3 --- /dev/null +++ b/engine/apps/oss_installation/views/cloud_heartbeat.py @@ -0,0 +1,27 @@ +from rest_framework import status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from rest_framework.views import APIView + +from apps.api.permissions import IsAdmin +from apps.auth_token.auth import PluginAuthentication +from apps.oss_installation.cloud_heartbeat import get_heartbeat_link, setup_heartbeat_integration +from apps.oss_installation.models import CloudConnector, CloudHeartbeat + + +class CloudHeartbeatView(APIView): + authentication_classes = (PluginAuthentication,) + permission_classes = (IsAuthenticated, IsAdmin) + + def post(self, request): + connector = CloudConnector.objects.first() + if connector is not None: + try: + CloudHeartbeat.objects.get() + return Response(status=status.HTTP_400_BAD_REQUEST, data={"detail": "Cloud heartbeat already exists"}) + except CloudHeartbeat.DoesNotExist: + heartbeat = setup_heartbeat_integration() + link = get_heartbeat_link(connector, heartbeat) + return Response(status=status.HTTP_200_OK, data={"link": link}) + else: + return Response(status=status.HTTP_400_BAD_REQUEST, data={"detail": "Grafana Cloud is not connected"}) diff --git a/engine/apps/oss_installation/views/cloud_heartbeat_status.py b/engine/apps/oss_installation/views/cloud_heartbeat_status.py deleted file mode 100644 index be553641..00000000 --- a/engine/apps/oss_installation/views/cloud_heartbeat_status.py +++ /dev/null @@ -1,15 +0,0 @@ -from rest_framework.permissions import IsAuthenticated -from rest_framework.response import Response -from rest_framework.views import APIView - -from apps.auth_token.auth import PluginAuthentication -from apps.oss_installation.models import CloudHeartbeat - - -class CloudHeartbeatStatusView(APIView): - authentication_classes = (PluginAuthentication,) - permission_classes = (IsAuthenticated,) - - def get(self, request): - response = {"status": CloudHeartbeat.status()} - return Response(response) diff --git a/engine/apps/oss_installation/views/cloud_users.py b/engine/apps/oss_installation/views/cloud_users.py new file mode 100644 index 00000000..3eb7685b --- /dev/null +++ b/engine/apps/oss_installation/views/cloud_users.py @@ -0,0 +1,107 @@ +from collections import OrderedDict + +from rest_framework import mixins, status, viewsets +from rest_framework.decorators import action +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from rest_framework.views import APIView + +from apps.api.permissions import ActionPermission, AnyRole, IsAdmin, IsOwnerOrAdmin +from apps.auth_token.auth import PluginAuthentication +from apps.oss_installation.models import CloudConnector, CloudUserIdentity +from apps.oss_installation.serializers import CloudUserSerializer +from apps.oss_installation.utils import cloud_user_identity_status +from apps.user_management.models import User +from common.api_helpers.mixins import PublicPrimaryKeyMixin +from common.api_helpers.paginators import HundredPageSizePaginator +from common.constants.role import Role + + +class CloudUsersView(HundredPageSizePaginator, APIView): + authentication_classes = (PluginAuthentication,) + permission_classes = (IsAuthenticated, IsAdmin) + + def get(self, request): + organization = request.user.organization + + queryset = User.objects.filter(organization=organization, role__in=[Role.ADMIN, Role.EDITOR]) + + if request.user.current_team is not None: + queryset = queryset.filter(teams=request.user.current_team).distinct() + emails = list(queryset.values_list("email", flat=True)) + + results = self.paginate_queryset(queryset, request, view=self) + + cloud_identities = list(CloudUserIdentity.objects.filter(email__in=emails)) + cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities} + + response = [] + + connector = CloudConnector.objects.first() + + for user in results: + cloud_identity = cloud_identities.get(user.email, None) + status, link = cloud_user_identity_status(connector, cloud_identity) + response.append( + { + "id": user.public_primary_key, + "email": user.email, + "username": user.username, + "cloud_data": {"status": status, "link": link}, + } + ) + + return self.get_paginated_response_with_matched_users_count(response, len(cloud_identities)) + + def get_paginated_response_with_matched_users_count(self, data, matched_users_count): + return Response( + OrderedDict( + [ + ("count", self.page.paginator.count), + ("matched_users_count", matched_users_count), + ("next", self.get_next_link()), + ("previous", self.get_previous_link()), + ("results", data), + ] + ) + ) + + def post(self, request): + connector = CloudConnector.objects.first() + if connector is not None: + sync_status, err = connector.sync_users_with_cloud() + return Response(status=status.HTTP_200_OK, data={"status": sync_status, "error": err}) + else: + return Response(status=status.HTTP_400_BAD_REQUEST, data={"detail": "Grafana Cloud is not connected"}) + + +class CloudUserView( + PublicPrimaryKeyMixin, + mixins.RetrieveModelMixin, + viewsets.GenericViewSet, +): + authentication_classes = (PluginAuthentication,) + permission_classes = (IsAuthenticated, ActionPermission) + + action_permissions = { + AnyRole: ("retrieve",), + IsAdmin: ("sync",), + } + action_object_permissions = { + IsOwnerOrAdmin: ("retrieve", "sync"), + } + serializer_class = CloudUserSerializer + + def get_queryset(self): + queryset = User.objects.filter(organization=self.request.user.organization) + return queryset + + @action(detail=True, methods=["post"]) + def sync(self, request, pk): + user = self.get_object() + connector = CloudConnector.objects.first() + if connector is not None: + sync_status, err = connector.sync_user_with_cloud(user) + return Response(status=status.HTTP_200_OK, data={"status": sync_status, "error": err}) + else: + return Response(status=status.HTTP_400_BAD_REQUEST, data={"detail": "Grafana Cloud is not connected"}) diff --git a/engine/apps/public_api/constants.py b/engine/apps/public_api/constants.py index 4a14df3f..cd2f6e38 100644 --- a/engine/apps/public_api/constants.py +++ b/engine/apps/public_api/constants.py @@ -1,69 +1,3 @@ from django.utils import dateparse -DEMO_USER_ID = "U4DNY931HHJS5" -DEMO_ORGANIZATION_ID = "TCNPY4A1BWUMP" -DEMO_SLACK_USER_ID = "UALEXSLACKDJPK" -DEMO_SLACK_TEAM_ID = "TALEXSLACKDJPK" -DEMO_AUTH_TOKEN = "meowmeowmeow" -DEMO_USER_USERNAME = "Alex" -DEMO_USER_EMAIL = "public-api-demo-user-1@amixr.io" -DEMO_INTEGRATION_ID = "CFRPV98RPR1U8" -DEMO_INTEGRATION_LINK_TOKEN = "mReAoNwDm0eMwKo1mTeTwYo" -DEMO_INTEGRATION_NAME = "Grafana :blush:" -DEMO_ROUTE_ID_1 = "RIYGUJXCPFHXY" -DEMO_ROUTE_ID_2 = "RVBE4RKQSCGJ2" -DEMO_SLACK_CHANNEL_FOR_ROUTE_ID = "CH23212D" -DEMO_ESCALATION_CHAIN_ID = "F5JU6KJET33FE" -DEMO_ESCALATION_POLICY_ID_1 = "E3GA6SJETWWJS" -DEMO_ESCALATION_POLICY_ID_2 = "E5JJTU52M5YM4" -DEMO_SCHEDULE_ID_ICAL = "SBM7DV7BKFUYU" -DEMO_SCHEDULE_ID_CALENDAR = "S3Z477AHDXTMF" -DEMO_SCHEDULE_NAME_ICAL = "Demo schedule iCal" -DEMO_SCHEDULE_NAME_CALENDAR = "Demo schedule Calendar" -DEMO_SCHEDULE_ICAL_URL_PRIMARY = "https://example.com/meow_calendar.ics" -DEMO_SCHEDULE_ICAL_URL_OVERRIDES = "https://example.com/meow_calendar_overrides.ics" -DEMO_INCIDENT_ID = "I68T24C13IFW1" -DEMO_INCIDENT_CREATED_AT = "2020-05-19T12:37:01.430444Z" -DEMO_INCIDENT_RESOLVED_AT = "2020-05-19T13:37:01.429805Z" -DEMO_ALERT_IDS = [ - ("AA74DN7T4JQB6", "2020-05-11T20:07:43Z"), - ("AR9SSYFKE2PV7", "2020-05-11T20:07:54Z"), - ("AWJQSGEYYUFGH", "2020-05-11T20:07:58Z"), -] -DEMO_ALERT_PAYLOAD = { - "evalMatches": [ - {"value": 100, "metric": "High value", "tags": None}, - {"value": 200, "metric": "Higher Value", "tags": None}, - ], - "message": "Someone is testing the alert notification within grafana.", - "ruleId": 0, - "ruleName": "Test notification", - "ruleUrl": "https://amixr.io/", - "state": "alerting", - "title": "[Alerting] Test notification", -} VALID_DATE_FOR_DELETE_INCIDENT = dateparse.parse_date("2020-07-04") -DEMO_SLACK_CHANNEL_NAME = "meow_channel" -DEMO_SLACK_CHANNEL_SLACK_ID = "MEOW_SLACK_ID" -DEMO_PERSONAL_NOTIFICATION_ID_1 = "NT79GA9I7E4DJ" -DEMO_PERSONAL_NOTIFICATION_ID_2 = "ND9EHN5LN1DUU" -DEMO_PERSONAL_NOTIFICATION_ID_3 = "NEF49YQ1HNPDD" -DEMO_PERSONAL_NOTIFICATION_ID_4 = "NWAL6WFJNWDD8" -DEMO_RESOLUTION_NOTE_ID = "M4BTQUS3PRHYQ" -DEMO_RESOLUTION_NOTE_TEXT = "Demo resolution note" -DEMO_RESOLUTION_NOTE_CREATED_AT = "2020-06-19T12:40:01.429805Z" -DEMO_RESOLUTION_NOTE_SOURCE = "web" -DEMO_CUSTOM_ACTION_ID = "KGEFG74LU1D8L" -DEMO_CUSTOM_ACTION_NAME = "Publish Incident To Jira" -DEMO_SLACK_USER_GROUP_ID = "GPFAPH7J7BKJB" -DEMO_SLACK_USER_GROUP_SLACK_ID = "MEOW_SLACK_ID" -DEMO_SLACK_USER_GROUP_NAME = "Meow Group" -DEMO_SLACK_USER_GROUP_HANDLE = "meow_group" -DEMO_ON_CALL_SHIFT_ID_1 = "OH3V5FYQEYJ6M" -DEMO_ON_CALL_SHIFT_ID_2 = "O9WTH7CKM3KZW" -DEMO_ON_CALL_SHIFT_NAME_1 = "Demo single event" -DEMO_ON_CALL_SHIFT_NAME_2 = "Demo recurrent event" -DEMO_ON_CALL_SHIFT_START_1 = "2020-09-10T08:00:00" -DEMO_ON_CALL_SHIFT_START_2 = "2020-09-10T16:00:00" -DEMO_ON_CALL_SHIFT_DURATION = 10800 -DEMO_ON_CALL_SHIFT_BY_DAY = ["MO", "WE", "FR"] diff --git a/engine/apps/public_api/helpers.py b/engine/apps/public_api/helpers.py index f684e34a..587445cb 100644 --- a/engine/apps/public_api/helpers.py +++ b/engine/apps/public_api/helpers.py @@ -1,14 +1,8 @@ -from apps.public_api.constants import DEMO_AUTH_TOKEN, VALID_DATE_FOR_DELETE_INCIDENT +from apps.public_api.constants import VALID_DATE_FOR_DELETE_INCIDENT from apps.slack.slack_client import SlackClientWithErrorHandling from apps.slack.slack_client.exceptions import SlackAPITokenException -def is_demo_token_request(request): - if DEMO_AUTH_TOKEN == request.headers.get("Authorization"): - return True - return False - - def team_has_slack_token_for_deleting(alert_group): if alert_group.slack_message and alert_group.slack_message.slack_team_identity: sc = SlackClientWithErrorHandling(alert_group.slack_message.slack_team_identity.bot_access_token) diff --git a/engine/apps/public_api/serializers/integrations.py b/engine/apps/public_api/serializers/integrations.py index 82d418c0..090523a2 100644 --- a/engine/apps/public_api/serializers/integrations.py +++ b/engine/apps/public_api/serializers/integrations.py @@ -4,8 +4,6 @@ from rest_framework import fields, serializers from apps.alerts.grafana_alerting_sync_manager.grafana_alerting_sync import GrafanaAlertingSyncManager from apps.alerts.models import AlertReceiveChannel -from apps.public_api.constants import DEMO_INTEGRATION_LINK_TOKEN -from apps.public_api.helpers import is_demo_token_request from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField from common.api_helpers.exceptions import BadRequest from common.api_helpers.mixins import EagerLoadingMixin @@ -62,12 +60,6 @@ class IntegrationSerializer(EagerLoadingMixin, serializers.ModelSerializer, Main default_route = self._get_default_route_iterative(instance) serializer = DefaultChannelFilterSerializer(default_route, context=self.context) result["default_route"] = serializer.data - if is_demo_token_request(self.context["request"]): - # Replace integration token to not receive alerts on demo integration - link = result["link"] - real_token = instance.token - link = link.replace(real_token, DEMO_INTEGRATION_LINK_TOKEN) - result["link"] = link return result diff --git a/engine/apps/public_api/serializers/schedules_base.py b/engine/apps/public_api/serializers/schedules_base.py index 80cd8bc5..8eed1cf8 100644 --- a/engine/apps/public_api/serializers/schedules_base.py +++ b/engine/apps/public_api/serializers/schedules_base.py @@ -2,8 +2,6 @@ from django.apps import apps from django.utils import timezone from rest_framework import serializers -from apps.public_api import constants as public_api_constants -from apps.public_api.helpers import is_demo_token_request from apps.schedules.ical_utils import list_users_to_notify_from_ical from apps.schedules.models import OnCallSchedule from apps.slack.models import SlackUserGroup @@ -36,14 +34,11 @@ class ScheduleBaseSerializer(serializers.ModelSerializer): raise BadRequest(detail="Schedule with this name already exists") def get_on_call_now(self, obj): - if not is_demo_token_request(self.context["request"]): - users_on_call = list_users_to_notify_from_ical(obj, timezone.datetime.now(timezone.utc)) - if users_on_call is not None: - return [user.public_primary_key for user in users_on_call] - else: - return [] + users_on_call = list_users_to_notify_from_ical(obj, timezone.datetime.now(timezone.utc)) + if users_on_call is not None: + return [user.public_primary_key for user in users_on_call] else: - return [public_api_constants.DEMO_USER_ID] + return [] def _correct_validated_data(self, validated_data): slack_field = validated_data.pop("slack", {}) diff --git a/engine/apps/public_api/tests/conftest.py b/engine/apps/public_api/tests/conftest.py index a4d11c26..f8b6f8b0 100644 --- a/engine/apps/public_api/tests/conftest.py +++ b/engine/apps/public_api/tests/conftest.py @@ -1,14 +1,7 @@ import pytest -from django.utils import dateparse, timezone from pytest_factoryboy import register -from apps.alerts.models import EscalationPolicy, ResolutionNote -from apps.auth_token.models import ApiAuthToken -from apps.base.models import UserNotificationPolicy -from apps.public_api import constants as public_api_constants -from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar, OnCallScheduleICal from apps.user_management.tests.factories import OrganizationFactory, UserFactory -from common.constants.role import Role register(UserFactory) register(OrganizationFactory) @@ -22,222 +15,3 @@ def make_organization_and_user_with_token(make_organization_and_user, make_publi return organization, user, token return _make_organization_and_user_with_token - - -@pytest.fixture() -def make_organization_and_user_with_slack_identities_for_demo_token( - make_slack_team_identity, - make_organization, - make_slack_user_identity, - make_user, -): - def _make_organization_and_user_with_slack_identities_for_demo_token(): - slack_team_identity = make_slack_team_identity(slack_id=public_api_constants.DEMO_SLACK_TEAM_ID) - organization = make_organization( - slack_team_identity=slack_team_identity, public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID - ) - slack_user_identity = make_slack_user_identity( - slack_id=public_api_constants.DEMO_SLACK_USER_ID, - slack_team_identity=slack_team_identity, - ) - user = make_user( - organization=organization, - public_primary_key=public_api_constants.DEMO_USER_ID, - email=public_api_constants.DEMO_USER_EMAIL, - username=public_api_constants.DEMO_USER_USERNAME, - role=Role.ADMIN, - slack_user_identity=slack_user_identity, - ) - ApiAuthToken.create_auth_token(user, organization, public_api_constants.DEMO_AUTH_TOKEN) - token = public_api_constants.DEMO_AUTH_TOKEN - return organization, user, token - - return _make_organization_and_user_with_slack_identities_for_demo_token - - -@pytest.fixture() -def make_data_for_demo_token( - make_alert_receive_channel, - make_channel_filter, - make_escalation_chain, - make_escalation_policy, - make_alert_group, - make_alert, - make_resolution_note, - make_custom_action, - make_slack_user_group, - make_schedule, - make_on_call_shift, - make_slack_channel, - make_user_notification_policy, -): - def _make_data_for_demo_token(organization, user): - alert_receive_channel = make_alert_receive_channel( - organization, - public_primary_key=public_api_constants.DEMO_INTEGRATION_ID, - verbal_name=public_api_constants.DEMO_INTEGRATION_NAME, - ) - route_1 = make_channel_filter( - public_primary_key=public_api_constants.DEMO_ROUTE_ID_1, - alert_receive_channel=alert_receive_channel, - slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID, - filtering_term="us-(east|west)", - order=0, - ) - make_channel_filter( - public_primary_key=public_api_constants.DEMO_ROUTE_ID_2, - alert_receive_channel=alert_receive_channel, - slack_channel_id=public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID, - filtering_term=".*", - order=1, - is_default=True, - ) - escalation_chain = make_escalation_chain( - organization, public_primary_key=public_api_constants.DEMO_ESCALATION_CHAIN_ID - ) - make_escalation_policy( - escalation_chain, - public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1, - escalation_policy_step=EscalationPolicy.STEP_WAIT, - order=0, - wait_delay=EscalationPolicy.ONE_MINUTE, - ) - escalation_policy_2 = make_escalation_policy( - escalation_chain, - public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_2, - escalation_policy_step=EscalationPolicy.STEP_NOTIFY_USERS_QUEUE, - order=1, - ) - escalation_policy_2.notify_to_users_queue.add(user) - alert_group = make_alert_group( - alert_receive_channel, - public_primary_key=public_api_constants.DEMO_INCIDENT_ID, - resolved=True, - channel_filter=route_1, - ) - alert_group.started_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_CREATED_AT) - alert_group.resolved_at = dateparse.parse_datetime(public_api_constants.DEMO_INCIDENT_RESOLVED_AT) - alert_group.save(update_fields=["started_at", "resolved_at"]) - for alert_id, created_at in public_api_constants.DEMO_ALERT_IDS: - alert = make_alert( - public_primary_key=alert_id, - alert_group=alert_group, - raw_request_data=public_api_constants.DEMO_ALERT_PAYLOAD, - ) - alert.created_at = dateparse.parse_datetime(created_at) - alert.save(update_fields=["created_at"]) - - resolution_note = make_resolution_note( - alert_group=alert_group, - source=ResolutionNote.Source.WEB, - author=user, - public_primary_key=public_api_constants.DEMO_RESOLUTION_NOTE_ID, - message_text=public_api_constants.DEMO_RESOLUTION_NOTE_TEXT, - ) - resolution_note.created_at = dateparse.parse_datetime(public_api_constants.DEMO_RESOLUTION_NOTE_CREATED_AT) - resolution_note.save(update_fields=["created_at"]) - - make_custom_action( - public_primary_key=public_api_constants.DEMO_CUSTOM_ACTION_ID, - organization=organization, - name=public_api_constants.DEMO_CUSTOM_ACTION_NAME, - ) - - user_group = make_slack_user_group( - public_primary_key=public_api_constants.DEMO_SLACK_USER_GROUP_ID, - name=public_api_constants.DEMO_SLACK_USER_GROUP_NAME, - handle=public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE, - slack_id=public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID, - slack_team_identity=organization.slack_team_identity, - ) - - # ical schedule - make_schedule( - organization=organization, - schedule_class=OnCallScheduleICal, - public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL, - ical_url_primary=public_api_constants.DEMO_SCHEDULE_ICAL_URL_PRIMARY, - ical_url_overrides=public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES, - name=public_api_constants.DEMO_SCHEDULE_NAME_ICAL, - channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - user_group=user_group, - ) - # calendar schedule - schedule_calendar = make_schedule( - organization=organization, - schedule_class=OnCallScheduleCalendar, - public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_CALENDAR, - name=public_api_constants.DEMO_SCHEDULE_NAME_CALENDAR, - channel=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - user_group=user_group, - time_zone="America/New_york", - ) - - on_call_shift_1 = make_on_call_shift( - shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, - organization=organization, - public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, - name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1, - start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_1), - duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION), - ) - on_call_shift_1.users.add(user) - - on_call_shift_2 = make_on_call_shift( - shift_type=CustomOnCallShift.TYPE_RECURRENT_EVENT, - organization=organization, - public_primary_key=public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, - name=public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2, - start=dateparse.parse_datetime(public_api_constants.DEMO_ON_CALL_SHIFT_START_2), - duration=timezone.timedelta(seconds=public_api_constants.DEMO_ON_CALL_SHIFT_DURATION), - frequency=CustomOnCallShift.FREQUENCY_WEEKLY, - interval=2, - by_day=public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY, - source=CustomOnCallShift.SOURCE_TERRAFORM, - ) - on_call_shift_2.users.add(user) - - schedule_calendar.custom_on_call_shifts.add(on_call_shift_1) - schedule_calendar.custom_on_call_shifts.add(on_call_shift_2) - - make_slack_channel( - organization.slack_team_identity, - slack_id=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - name=public_api_constants.DEMO_SLACK_CHANNEL_NAME, - ) - make_user_notification_policy( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1, - important=False, - user=user, - notify_by=UserNotificationPolicy.NotificationChannel.SMS, - step=UserNotificationPolicy.Step.NOTIFY, - order=0, - ) - make_user_notification_policy( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2, - important=False, - user=user, - step=UserNotificationPolicy.Step.WAIT, - wait_delay=UserNotificationPolicy.FIVE_MINUTES, - order=1, - ) - make_user_notification_policy( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3, - important=False, - user=user, - step=UserNotificationPolicy.Step.NOTIFY, - notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL, - order=2, - ) - - make_user_notification_policy( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4, - important=True, - user=user, - step=UserNotificationPolicy.Step.NOTIFY, - notify_by=UserNotificationPolicy.NotificationChannel.PHONE_CALL, - order=0, - ) - return - - return _make_data_for_demo_token diff --git a/engine/apps/public_api/tests/test_demo_token/__init__.py b/engine/apps/public_api/tests/test_demo_token/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/engine/apps/public_api/tests/test_demo_token/test_alerts.py b/engine/apps/public_api/tests/test_demo_token/test_alerts.py deleted file mode 100644 index 4153ca2b..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_alerts.py +++ /dev/null @@ -1,110 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants - -demo_alerts_results = [] -for alert_id, created_at in public_api_constants.DEMO_ALERT_IDS: - demo_alerts_results.append( - { - "id": alert_id, - "alert_group_id": public_api_constants.DEMO_INCIDENT_ID, - "created_at": created_at, - "payload": { - "state": "alerting", - "title": "[Alerting] Test notification", - "ruleId": 0, - "message": "Someone is testing the alert notification within grafana.", - "ruleUrl": "https://amixr.io/", - "ruleName": "Test notification", - "evalMatches": [ - {"tags": None, "value": 100, "metric": "High value"}, - {"tags": None, "value": 200, "metric": "Higher Value"}, - ], - }, - } - ) - -# https://api-docs.amixr.io/#list-alerts -demo_alerts_payload = {"count": 3, "next": None, "previous": None, "results": demo_alerts_results} - - -@pytest.mark.django_db -def test_get_alerts( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alerts-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_alerts_payload - - -@pytest.mark.django_db -def test_get_alerts_filter_by_incident( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alerts-list") - response = client.get( - url + f"?alert_group_id={public_api_constants.DEMO_INCIDENT_ID}", format="json", HTTP_AUTHORIZATION=token - ) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_alerts_payload - - -@pytest.mark.django_db -def test_get_alerts_filter_by_incident_no_results( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alerts-list") - response = client.get(url + "?alert_group_id=impossible_alert_group_id", format="json", HTTP_AUTHORIZATION=token) - assert response.status_code == status.HTTP_200_OK - assert response.data["results"] == [] - - -@pytest.mark.django_db -def test_get_alerts_search( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alerts-list") - response = client.get(url + "?search=evalMatches", format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_alerts_payload - - -@pytest.mark.django_db -def test_get_alerts_search_no_results( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alerts-list") - response = client.get(url + "?search=impossible_payload", format="json", HTTP_AUTHORIZATION=token) - assert response.status_code == status.HTTP_200_OK - assert response.data["results"] == [] diff --git a/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py b/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py deleted file mode 100644 index 6cf21903..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_custom_actions.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants - -demo_custom_action_payload = { - "id": public_api_constants.DEMO_CUSTOM_ACTION_ID, - "name": public_api_constants.DEMO_CUSTOM_ACTION_NAME, - "team_id": None, -} - -demo_custom_action_payload_list = {"count": 1, "next": None, "previous": None, "results": [demo_custom_action_payload]} - - -@pytest.mark.django_db -def test_demo_get_custom_actions_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:actions-list") - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_custom_action_payload_list diff --git a/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py b/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py deleted file mode 100644 index 4df862b6..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_escalation_policies.py +++ /dev/null @@ -1,169 +0,0 @@ -import pytest -from django.urls import reverse -from django.utils import timezone -from rest_framework import status -from rest_framework.test import APIClient - -from apps.alerts.models import EscalationPolicy -from apps.public_api import constants as public_api_constants - -# https://api-docs.amixr.io/#get-escalation-policy -demo_escalation_policy_payload = { - "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_1, - "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID, - "position": 0, - "type": "wait", - "duration": timezone.timedelta(seconds=60).seconds, -} - -# https://api-docs.amixr.io/#list-escalation-policies -demo_escalation_policies_payload = { - "count": 2, - "next": None, - "previous": None, - "results": [ - { - "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_1, - "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID, - "position": 0, - "type": "wait", - "duration": timezone.timedelta(seconds=60).seconds, - }, - { - "id": public_api_constants.DEMO_ESCALATION_POLICY_ID_2, - "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID, - "position": 1, - "type": "notify_person_next_each_time", - "persons_to_notify_next_each_time": ["U4DNY931HHJS5"], - }, - ], -} - - -@pytest.mark.django_db -def test_get_escalation_policies( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:escalation_policies-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_escalation_policies_payload - - -@pytest.mark.django_db -def test_get_escalation_policies_filter_by_route( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:escalation_policies-list") - response = client.get( - url + f"?route_id={public_api_constants.DEMO_ROUTE_ID_1}", format="json", HTTP_AUTHORIZATION=token - ) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_escalation_policies_payload - - -@pytest.mark.django_db -def test_create_escalation_policy( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - data_for_create = { - "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID, - "type": "notify_person_next_each_time", - "position": 0, - "persons_to_notify_next_each_time": [user.public_primary_key], - } - url = reverse("api-public:escalation_policies-list") - response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_201_CREATED - # check on nothing change - assert response.json() == demo_escalation_policy_payload - - -@pytest.mark.django_db -def test_invalid_step_type( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - data_for_create = { - "escalation_chain_id": public_api_constants.DEMO_ESCALATION_CHAIN_ID, - "type": "this_is_invalid_step_type", # invalid step type - "position": 0, - "persons_to_notify_next_each_time": [user.public_primary_key], - } - url = reverse("api-public:escalation_policies-list") - response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_201_CREATED - # check on nothing change - assert response.json() == demo_escalation_policy_payload - - -@pytest.mark.django_db -def test_update_escalation_step( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - data_for_update = { - "route_id": public_api_constants.DEMO_ROUTE_ID_1, - "type": "notify_person_next_each_time", - "position": 1, - "persons_to_notify_next_each_time": [user.public_primary_key], - } - url = reverse( - "api-public:escalation_policies-detail", kwargs={"pk": public_api_constants.DEMO_ESCALATION_POLICY_ID_1} - ) - response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - # check on nothing change - assert response.json() == demo_escalation_policy_payload - - -@pytest.mark.django_db -def test_delete_escalation_policy( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - escalation_policy = EscalationPolicy.objects.get( - public_primary_key=public_api_constants.DEMO_ESCALATION_POLICY_ID_1 - ) - - url = reverse("api-public:escalation_policies-detail", args=[escalation_policy.public_primary_key]) - response = client.delete(url, format="json", HTTP_AUTHORIZATION=token) - - escalation_policy.refresh_from_db() - - assert response.status_code == status.HTTP_204_NO_CONTENT - # check on nothing change - escalation_policy.refresh_from_db() - assert escalation_policy is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_incidents.py b/engine/apps/public_api/tests/test_demo_token/test_incidents.py deleted file mode 100644 index 26aa3b1a..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_incidents.py +++ /dev/null @@ -1,82 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.alerts.models import AlertGroup -from apps.public_api import constants as public_api_constants - -demo_incidents_payload = { - "count": 1, - "next": None, - "previous": None, - "results": [ - { - "id": public_api_constants.DEMO_INCIDENT_ID, - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "route_id": public_api_constants.DEMO_ROUTE_ID_1, - "alerts_count": 3, - "state": "resolved", - "created_at": public_api_constants.DEMO_INCIDENT_CREATED_AT, - "resolved_at": public_api_constants.DEMO_INCIDENT_RESOLVED_AT, - "acknowledged_at": None, - "title": None, - } - ], -} - - -@pytest.mark.django_db -def test_create_incidents( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alert_groups-list") - response = client.post(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED - - -@pytest.mark.django_db -def test_get_incidents( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alert_groups-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_incidents_payload - - -@pytest.mark.django_db -def test_delete_incidents( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:alert_groups-list") - incidents = AlertGroup.unarchived_objects.filter(public_primary_key=public_api_constants.DEMO_INCIDENT_ID) - total_count = incidents.count() - incident = incidents[0] - data = { - "mode": "delete", - } - response = client.delete(url + f"/{incident.public_primary_key}/", data, format="json", HTTP_AUTHORIZATION=token) - new_count = AlertGroup.unarchived_objects.filter(public_primary_key=public_api_constants.DEMO_INCIDENT_ID).count() - - assert response.status_code == status.HTTP_204_NO_CONTENT - incident.refresh_from_db() - assert total_count == new_count - assert incident is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_integrations.py b/engine/apps/public_api/tests/test_demo_token/test_integrations.py deleted file mode 100644 index be06f367..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_integrations.py +++ /dev/null @@ -1,239 +0,0 @@ -from urllib.parse import urljoin - -import pytest -from django.conf import settings -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.alerts.models import AlertReceiveChannel -from apps.public_api import constants as public_api_constants - -# https://api-docs.amixr.io/#post-integration -demo_integration_post_payload = { - "id": public_api_constants.DEMO_INTEGRATION_ID, - "team_id": None, - "name": "Grafana :blush:", - "link": urljoin(settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/"), - "heartbeat": None, - "default_route": { - "escalation_chain_id": None, - "id": public_api_constants.DEMO_ROUTE_ID_2, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, - }, - "type": "grafana", - "templates": { - "grouping_key": None, - "resolve_signal": None, - "acknowledge_signal": None, - "slack": {"title": None, "message": None, "image_url": None}, - "web": {"title": None, "message": None, "image_url": None}, - "sms": { - "title": None, - }, - "phone_call": { - "title": None, - }, - "email": { - "title": None, - "message": None, - }, - "telegram": { - "title": None, - "message": None, - "image_url": None, - }, - }, - "maintenance_mode": None, - "maintenance_started_at": None, - "maintenance_end_at": None, -} - -# https://api-docs.amixr.io/#get-integration -demo_integration_payload = { - "id": public_api_constants.DEMO_INTEGRATION_ID, - "team_id": None, - "name": "Grafana :blush:", - "link": urljoin(settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/"), - "default_route": { - "escalation_chain_id": None, - "id": public_api_constants.DEMO_ROUTE_ID_2, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, - }, - "type": "grafana", - "heartbeat": None, - "templates": { - "grouping_key": None, - "resolve_signal": None, - "acknowledge_signal": None, - "slack": {"title": None, "message": None, "image_url": None}, - "web": {"title": None, "message": None, "image_url": None}, - "sms": { - "title": None, - }, - "phone_call": { - "title": None, - }, - "email": { - "title": None, - "message": None, - }, - "telegram": { - "title": None, - "message": None, - "image_url": None, - }, - }, - "maintenance_mode": None, - "maintenance_started_at": None, - "maintenance_end_at": None, -} - -# https://api-docs.amixr.io/#list-integrations -demo_integrations_payload = { - "count": 1, - "next": None, - "previous": None, - "results": [ - { - "id": public_api_constants.DEMO_INTEGRATION_ID, - "team_id": None, - "name": "Grafana :blush:", - "link": urljoin( - settings.BASE_URL, f"/integrations/v1/grafana/{public_api_constants.DEMO_INTEGRATION_LINK_TOKEN}/" - ), - "default_route": { - "escalation_chain_id": None, - "id": public_api_constants.DEMO_ROUTE_ID_2, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, - }, - "type": "grafana", - "heartbeat": None, - "templates": { - "grouping_key": None, - "resolve_signal": None, - "acknowledge_signal": None, - "slack": { - "title": None, - "message": None, - "image_url": None, - }, - "web": {"title": None, "message": None, "image_url": None}, - "sms": { - "title": None, - }, - "phone_call": { - "title": None, - }, - "email": { - "title": None, - "message": None, - }, - "telegram": { - "title": None, - "message": None, - "image_url": None, - }, - }, - "maintenance_mode": None, - "maintenance_started_at": None, - "maintenance_end_at": None, - }, - ], -} - - -@pytest.mark.django_db -def test_get_integrations( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - url = reverse("api-public:integrations-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_integrations_payload - - -@pytest.mark.django_db -def test_create_integration( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - data_for_create = {"type": "grafana"} - url = reverse("api-public:integrations-list") - response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_201_CREATED - # check on nothing change - assert response.json() == demo_integration_post_payload - - -@pytest.mark.django_db -def test_update_integration( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - integration = AlertReceiveChannel.objects.get(public_primary_key=public_api_constants.DEMO_INTEGRATION_ID) - data_for_update = {"name": "new_name"} - url = reverse("api-public:integrations-detail", args=[integration.public_primary_key]) - response = client.put(url, data=data_for_update, format="json", HTTP_AUTHORIZATION=token) - - integration.refresh_from_db() - - assert response.status_code == status.HTTP_200_OK - # check on nothing change - assert response.json() == demo_integration_payload - - -@pytest.mark.django_db -def test_invalid_integration_type( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - data_for_create = {"type": "this_is_invalid_integration_type"} - url = reverse("api-public:integrations-list") - response = client.post(url, data=data_for_create, format="json", HTTP_AUTHORIZATION=token) - assert response.status_code == status.HTTP_201_CREATED - # check on nothing change - assert response.json() == demo_integration_post_payload - - -@pytest.mark.django_db -def test_delete_integration( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - integration = AlertReceiveChannel.objects.get(public_primary_key=public_api_constants.DEMO_INTEGRATION_ID) - - url = reverse("api-public:integrations-detail", args=[integration.public_primary_key]) - response = client.delete(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_204_NO_CONTENT - # check on nothing change - integration.refresh_from_db() - assert integration is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py b/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py deleted file mode 100644 index f4c4552d..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_on_call_shift.py +++ /dev/null @@ -1,172 +0,0 @@ -import pytest -from django.urls import reverse -from django.utils import timezone -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants -from apps.schedules.models import CustomOnCallShift - -demo_on_call_shift_payload_1 = { - "id": public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, - "team_id": None, - "name": public_api_constants.DEMO_ON_CALL_SHIFT_NAME_1, - "type": "single_event", - "time_zone": None, - "level": 0, - "start": public_api_constants.DEMO_ON_CALL_SHIFT_START_1, - "duration": public_api_constants.DEMO_ON_CALL_SHIFT_DURATION, - "users": [public_api_constants.DEMO_USER_ID], -} - -demo_on_call_shift_payload_2 = { - "id": public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, - "team_id": None, - "name": public_api_constants.DEMO_ON_CALL_SHIFT_NAME_2, - "type": "recurrent_event", - "time_zone": None, - "level": 0, - "start": public_api_constants.DEMO_ON_CALL_SHIFT_START_2, - "duration": public_api_constants.DEMO_ON_CALL_SHIFT_DURATION, - "frequency": "weekly", - "interval": 2, - "week_start": "SU", - "users": [public_api_constants.DEMO_USER_ID], - "by_day": public_api_constants.DEMO_ON_CALL_SHIFT_BY_DAY, - "by_month": None, - "by_monthday": None, -} - -demo_on_call_shift_payload_list = { - "count": 2, - "next": None, - "previous": None, - "results": [demo_on_call_shift_payload_1, demo_on_call_shift_payload_2], -} - - -@pytest.mark.django_db -def test_demo_get_on_call_shift_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:on_call_shifts-list") - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_on_call_shift_payload_list - - -@pytest.mark.django_db -@pytest.mark.parametrize( - "demo_on_call_shift_id,payload", - [ - (public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, demo_on_call_shift_payload_1), - (public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, demo_on_call_shift_payload_2), - ], -) -def test_demo_get_on_call_shift_1( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, - demo_on_call_shift_id, - payload, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id}) - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == payload - - -@pytest.mark.django_db -def test_demo_post_on_call_shift( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:on_call_shifts-list") - - data = { - "schedule_id": public_api_constants.DEMO_SCHEDULE_ID_CALENDAR, - "name": "New demo shift", - "type": CustomOnCallShift.TYPE_SINGLE_EVENT, - "start": timezone.now().replace(tzinfo=None, microsecond=0).isoformat(), - "duration": 3600, - } - - response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_201_CREATED - assert response.data == demo_on_call_shift_payload_1 - - -@pytest.mark.django_db -@pytest.mark.parametrize( - "demo_on_call_shift_id,payload", - [ - (public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, demo_on_call_shift_payload_1), - (public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, demo_on_call_shift_payload_2), - ], -) -def test_demo_update_on_call_shift( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, - demo_on_call_shift_id, - payload, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - data = {"name": "Updated demo name"} - - url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id}) - - response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == payload - - -@pytest.mark.django_db -@pytest.mark.parametrize( - "demo_on_call_shift_id", - [ - public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, - public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, - ], -) -def test_demo_delete_on_call_shift( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, - demo_on_call_shift_id, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:on_call_shifts-detail", kwargs={"pk": demo_on_call_shift_id}) - - response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_204_NO_CONTENT - assert CustomOnCallShift.objects.filter(public_primary_key=demo_on_call_shift_id).exists() diff --git a/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py b/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py deleted file mode 100644 index d0abf315..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_personal_notification_rules.py +++ /dev/null @@ -1,225 +0,0 @@ -import pytest -from django.urls import reverse -from django.utils import timezone -from rest_framework import status -from rest_framework.test import APIClient - -from apps.base.models import UserNotificationPolicy -from apps.base.models.user_notification_policy import NotificationChannelPublicAPIOptions -from apps.public_api import constants as public_api_constants - -TYPE_WAIT = "wait" - -demo_personal_notification_rule_payload_1 = { - "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1, - "user_id": public_api_constants.DEMO_USER_ID, - "position": 0, - "important": False, - "type": "notify_by_sms", -} - -demo_personal_notification_rule_payload_2 = { - "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_2, - "user_id": public_api_constants.DEMO_USER_ID, - "position": 1, - "duration": timezone.timedelta(seconds=300).seconds, - "important": False, - "type": "wait", -} - -demo_personal_notification_rule_payload_3 = { - "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_3, - "user_id": public_api_constants.DEMO_USER_ID, - "position": 2, - "important": False, - "type": "notify_by_phone_call", -} - -demo_personal_notification_rule_payload_4 = { - "id": public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_4, - "user_id": public_api_constants.DEMO_USER_ID, - "position": 0, - "important": True, - "type": "notify_by_phone_call", -} - -demo_personal_notification_rules_payload = { - "count": 4, - "next": None, - "previous": None, - "results": [ - demo_personal_notification_rule_payload_1, - demo_personal_notification_rule_payload_2, - demo_personal_notification_rule_payload_3, - demo_personal_notification_rule_payload_4, - ], -} - -demo_personal_notification_rules_non_important_payload = { - "count": 3, - "next": None, - "previous": None, - "results": [ - demo_personal_notification_rule_payload_1, - demo_personal_notification_rule_payload_2, - demo_personal_notification_rule_payload_3, - ], -} - -demo_personal_notification_rules_important_payload = { - "count": 1, - "next": None, - "previous": None, - "results": [ - demo_personal_notification_rule_payload_4, - ], -} - - -@pytest.mark.django_db -def test_get_personal_notification_rule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - - demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1 - ) - client = APIClient() - - url = reverse( - "api-public:personal_notification_rules-detail", - kwargs={"pk": demo_personal_notification_rule_1.public_primary_key}, - ) - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_personal_notification_rule_payload_1 - - -@pytest.mark.django_db -def test_get_personal_notification_rules_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - - client = APIClient() - - url = reverse("api-public:personal_notification_rules-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_personal_notification_rules_payload - - -@pytest.mark.django_db -def test_get_personal_notification_rules_list_important( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - client = APIClient() - - url = reverse("api-public:personal_notification_rules-list") - response = client.get(url + "?important=true", format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_personal_notification_rules_important_payload - - -@pytest.mark.django_db -def test_get_personal_notification_rules_list_non_important( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - - client = APIClient() - - url = reverse("api-public:personal_notification_rules-list") - response = client.get(url + "?important=false", format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_personal_notification_rules_non_important_payload - - -@pytest.mark.django_db -def test_update_personal_notification_rule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1 - ) - client = APIClient() - - url = reverse( - "api-public:personal_notification_rules-detail", - kwargs={"pk": demo_personal_notification_rule_1.public_primary_key}, - ) - - data_to_update = { - "type": NotificationChannelPublicAPIOptions.LABELS[UserNotificationPolicy.NotificationChannel.SLACK] - } - response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_personal_notification_rule_payload_1 - # check on nothing change - demo_personal_notification_rule_1.refresh_from_db() - assert demo_personal_notification_rule_1.notify_by != UserNotificationPolicy.NotificationChannel.SLACK - - -@pytest.mark.django_db -def test_create_personal_notification_rule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - client = APIClient() - - url = reverse("api-public:personal_notification_rules-list") - data_for_create = { - "user_id": user.public_primary_key, - "type": TYPE_WAIT, - "position": 1, - "duration": timezone.timedelta(seconds=300).seconds, - } - response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create) - - assert response.status_code == status.HTTP_201_CREATED - assert response.json() == demo_personal_notification_rule_payload_1 - - -@pytest.mark.django_db -def test_delete_personal_notification_rule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - _ = make_data_for_demo_token(organization, user) - demo_personal_notification_rule_1 = UserNotificationPolicy.objects.get( - public_primary_key=public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1 - ) - client = APIClient() - - url = reverse( - "api-public:personal_notification_rules-detail", - kwargs={"pk": demo_personal_notification_rule_1.public_primary_key}, - ) - - response = client.delete(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_204_NO_CONTENT - # check on nothing change - demo_personal_notification_rule_1.refresh_from_db() - assert demo_personal_notification_rule_1 is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py b/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py deleted file mode 100644 index 888760e9..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_resolution_notes.py +++ /dev/null @@ -1,117 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.alerts.models import ResolutionNote -from apps.public_api import constants as public_api_constants - -demo_resolution_note_payload = { - "id": public_api_constants.DEMO_RESOLUTION_NOTE_ID, - "alert_group_id": public_api_constants.DEMO_INCIDENT_ID, - "author": public_api_constants.DEMO_USER_ID, - "source": public_api_constants.DEMO_RESOLUTION_NOTE_SOURCE, - "created_at": public_api_constants.DEMO_RESOLUTION_NOTE_CREATED_AT, - "text": public_api_constants.DEMO_RESOLUTION_NOTE_TEXT, -} - -demo_resolution_note_payload_list = { - "count": 1, - "next": None, - "previous": None, - "results": [demo_resolution_note_payload], -} - - -@pytest.mark.django_db -def test_demo_get_resolution_note_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:resolution_notes-list") - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_resolution_note_payload_list - - -@pytest.mark.django_db -def test_demo_get_resolution_note( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID}) - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_resolution_note_payload - - -@pytest.mark.django_db -def test_demo_post_resolution_note( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:resolution_notes-list") - - data = {"alert_group_id": public_api_constants.DEMO_INCIDENT_ID, "text": "New demo text"} - - response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_201_CREATED - assert response.data == demo_resolution_note_payload - - -@pytest.mark.django_db -def test_demo_update_resolution_note( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - data = {"alert_group_id": public_api_constants.DEMO_INCIDENT_ID, "text": "Updated demo text"} - - url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID}) - - response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_resolution_note_payload - - -@pytest.mark.django_db -def test_demo_delete_resolution_note( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:resolution_notes-detail", kwargs={"pk": public_api_constants.DEMO_RESOLUTION_NOTE_ID}) - - response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_204_NO_CONTENT - assert ResolutionNote.objects.filter(public_primary_key=public_api_constants.DEMO_RESOLUTION_NOTE_ID).exists() diff --git a/engine/apps/public_api/tests/test_demo_token/test_routes.py b/engine/apps/public_api/tests/test_demo_token/test_routes.py deleted file mode 100644 index cd8938db..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_routes.py +++ /dev/null @@ -1,182 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.alerts.models import ChannelFilter -from apps.public_api import constants as public_api_constants - -# https://api-docs.amixr.io/#get-route -demo_route_payload = { - "id": public_api_constants.DEMO_ROUTE_ID_1, - "escalation_chain_id": None, - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": False, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, -} - -# https://api-docs.amixr.io/#list-routes -demo_routes_payload = { - "count": 2, - "next": None, - "previous": None, - "results": [ - { - "id": public_api_constants.DEMO_ROUTE_ID_1, - "escalation_chain_id": None, - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "routing_regex": "us-(east|west)", - "position": 0, - "is_the_last_route": False, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, - }, - { - "id": public_api_constants.DEMO_ROUTE_ID_2, - "escalation_chain_id": None, - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "routing_regex": ".*", - "position": 1, - "is_the_last_route": True, - "slack": {"channel_id": public_api_constants.DEMO_SLACK_CHANNEL_FOR_ROUTE_ID}, - }, - ], -} - - -@pytest.mark.django_db -def test_get_route( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1) - - url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key}) - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_route_payload - - -@pytest.mark.django_db -def test_get_routes_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:routes-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_routes_payload - - -@pytest.mark.django_db -def test_get_routes_filter_by_integration_id( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:routes-list") - response = client.get( - url + f"?integration_id={public_api_constants.DEMO_INTEGRATION_ID}", format="json", HTTP_AUTHORIZATION=token - ) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_routes_payload - - -@pytest.mark.django_db -def test_create_route( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:routes-list") - data_for_create = { - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "routing_regex": "testreg", - } - response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create) - - assert response.status_code == status.HTTP_201_CREATED - assert response.json() == demo_route_payload - - -@pytest.mark.django_db -def test_invalid_route_data( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:routes-list") - data_for_create = { - "integration_id": public_api_constants.DEMO_INTEGRATION_ID, - "routing_regex": None, # routing_regex cannot be null for non-default filters - } - response = client.post(url, format="json", HTTP_AUTHORIZATION=token, data=data_for_create) - - assert response.status_code == status.HTTP_201_CREATED - assert response.json() == demo_route_payload - - -@pytest.mark.django_db -def test_update_route( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1) - - url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key}) - data_to_update = { - "routing_regex": "testreg_updated", - } - - assert channel_filter.filtering_term != data_to_update["routing_regex"] - - response = client.put(url, format="json", HTTP_AUTHORIZATION=token, data=data_to_update) - - assert response.status_code == status.HTTP_200_OK - # check on nothing change - channel_filter.refresh_from_db() - assert response.json() == demo_route_payload - assert channel_filter.filtering_term != data_to_update["routing_regex"] - - -@pytest.mark.django_db -def test_delete_route( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - channel_filter = ChannelFilter.objects.get(public_primary_key=public_api_constants.DEMO_ROUTE_ID_1) - - url = reverse("api-public:routes-detail", kwargs={"pk": channel_filter.public_primary_key}) - response = client.delete(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_204_NO_CONTENT - # check on nothing change - channel_filter.refresh_from_db() - assert channel_filter is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_schedules.py b/engine/apps/public_api/tests/test_demo_token/test_schedules.py deleted file mode 100644 index 9a56955b..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_schedules.py +++ /dev/null @@ -1,164 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants -from apps.schedules.models import OnCallSchedule - -demo_ical_schedule_payload = { - "id": public_api_constants.DEMO_SCHEDULE_ID_ICAL, - "team_id": None, - "name": public_api_constants.DEMO_SCHEDULE_NAME_ICAL, - "type": "ical", - "ical_url_primary": public_api_constants.DEMO_SCHEDULE_ICAL_URL_PRIMARY, - "ical_url_overrides": public_api_constants.DEMO_SCHEDULE_ICAL_URL_OVERRIDES, - "on_call_now": [public_api_constants.DEMO_USER_ID], - "slack": { - "channel_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - "user_group_id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID, - }, -} - -demo_calendar_schedule_payload = { - "id": public_api_constants.DEMO_SCHEDULE_ID_CALENDAR, - "team_id": None, - "name": public_api_constants.DEMO_SCHEDULE_NAME_CALENDAR, - "type": "calendar", - "time_zone": "America/New_york", - "on_call_now": [public_api_constants.DEMO_USER_ID], - "shifts": [ - public_api_constants.DEMO_ON_CALL_SHIFT_ID_1, - public_api_constants.DEMO_ON_CALL_SHIFT_ID_2, - ], - "slack": { - "channel_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - "user_group_id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID, - }, - "ical_url_overrides": None, -} - -demo_schedules_payload = { - "count": 2, - "next": None, - "previous": None, - "results": [ - demo_ical_schedule_payload, - demo_calendar_schedule_payload, - ], -} - - -@pytest.mark.django_db -def test_get_schedule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL) - - url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key}) - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_ical_schedule_payload - - -@pytest.mark.django_db -def test_create_schedule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:schedules-list") - - data = { - "name": "schedule test name", - "type": "ical", - } - - response = client.post(url, data=data, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_201_CREATED - # check that demo instance was returned - assert response.data == demo_ical_schedule_payload - - -@pytest.mark.django_db -def test_update_ical_schedule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL) - - url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key}) - - data = { - "name": "NEW NAME", - } - - response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - # check on nothing change - schedule.refresh_from_db() - assert schedule.name != data["name"] - assert response.data == demo_ical_schedule_payload - - -@pytest.mark.django_db -def test_update_calendar_schedule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_CALENDAR) - - url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key}) - - data = { - "name": "NEW NAME", - } - - response = client.put(url, data=data, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - # check on nothing change - schedule.refresh_from_db() - assert schedule.name != data["name"] - assert response.data == demo_calendar_schedule_payload - - -@pytest.mark.django_db -def test_delete_schedule( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - schedule = OnCallSchedule.objects.get(public_primary_key=public_api_constants.DEMO_SCHEDULE_ID_ICAL) - - url = reverse("api-public:schedules-detail", kwargs={"pk": schedule.public_primary_key}) - - response = client.delete(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_204_NO_CONTENT - # check on nothing change - schedule.refresh_from_db() - assert schedule is not None diff --git a/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py b/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py deleted file mode 100644 index 80a11bdc..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_slack_channels.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants - -demo_slack_channels_payload = { - "count": 1, - "next": None, - "previous": None, - "results": [ - { - "name": public_api_constants.DEMO_SLACK_CHANNEL_NAME, - "slack_id": public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - } - ], -} - - -@pytest.mark.django_db -def test_get_slack_channels_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:slack_channels-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_slack_channels_payload diff --git a/engine/apps/public_api/tests/test_demo_token/test_user_groups.py b/engine/apps/public_api/tests/test_demo_token/test_user_groups.py deleted file mode 100644 index 08ee995c..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_user_groups.py +++ /dev/null @@ -1,36 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants - -demo_user_group_payload = { - "id": public_api_constants.DEMO_SLACK_USER_GROUP_ID, - "type": "slack_based", - "slack": { - "id": public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID, - "name": public_api_constants.DEMO_SLACK_USER_GROUP_NAME, - "handle": public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE, - }, -} - -demo_user_group_payload_list = {"count": 1, "next": None, "previous": None, "results": [demo_user_group_payload]} - - -@pytest.mark.django_db -def test_demo_get_user_groups_list( - make_organization_and_user_with_slack_identities_for_demo_token, - make_data_for_demo_token, -): - - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - client = APIClient() - _ = make_data_for_demo_token(organization, user) - - url = reverse("api-public:user_groups-list") - - response = client.get(url, format="json", HTTP_AUTHORIZATION=f"{token}") - - assert response.status_code == status.HTTP_200_OK - assert response.data == demo_user_group_payload_list diff --git a/engine/apps/public_api/tests/test_demo_token/test_users.py b/engine/apps/public_api/tests/test_demo_token/test_users.py deleted file mode 100644 index ffa4bfdb..00000000 --- a/engine/apps/public_api/tests/test_demo_token/test_users.py +++ /dev/null @@ -1,91 +0,0 @@ -import pytest -from django.urls import reverse -from rest_framework import status -from rest_framework.test import APIClient - -from apps.public_api import constants as public_api_constants - -# NB can compare with https://api-docs.amixr.io/#get-user - -demo_token_user_payload = { - "id": public_api_constants.DEMO_USER_ID, - "email": public_api_constants.DEMO_USER_EMAIL, - "slack": {"user_id": public_api_constants.DEMO_SLACK_USER_ID, "team_id": public_api_constants.DEMO_SLACK_TEAM_ID}, - "username": public_api_constants.DEMO_USER_USERNAME, - "role": "admin", - "is_phone_number_verified": False, -} - -# https://api-docs.amixr.io/#list-users -demo_token_users_payload = { - "count": 1, - "next": None, - "previous": None, - "results": [ - { - "id": public_api_constants.DEMO_USER_ID, - "email": public_api_constants.DEMO_USER_EMAIL, - "slack": { - "user_id": public_api_constants.DEMO_SLACK_USER_ID, - "team_id": public_api_constants.DEMO_SLACK_TEAM_ID, - }, - "username": public_api_constants.DEMO_USER_USERNAME, - "role": "admin", - "is_phone_number_verified": False, - } - ], -} - - -@pytest.mark.django_db -def test_get_user( - make_organization_and_user_with_slack_identities_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - - url = reverse("api-public:users-detail", args=[user.public_primary_key]) - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_token_user_payload - - # get current user - url = reverse("api-public:users-detail", args=["current"]) - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_token_user_payload - - -@pytest.mark.django_db -def test_get_users( - make_organization_and_user_with_slack_identities_for_demo_token, -): - organization, user, token = make_organization_and_user_with_slack_identities_for_demo_token() - - client = APIClient() - - url = reverse("api-public:users-list") - response = client.get(url, format="json", HTTP_AUTHORIZATION=token) - - assert response.status_code == status.HTTP_200_OK - assert response.json() == demo_token_users_payload - - -@pytest.mark.django_db -def test_forbidden_access( - make_organization_and_user_with_slack_identities_for_demo_token, - make_organization_and_user_with_token, -): - _, user, _ = make_organization_and_user_with_slack_identities_for_demo_token() - _, _, another_org_token = make_organization_and_user_with_token() - - client = APIClient() - - url = reverse("api-public:users-detail", args=[user.public_primary_key]) - - response = client.get(url, format="json", HTTP_AUTHORIZATION=another_org_token) - - assert response.status_code == status.HTTP_404_NOT_FOUND diff --git a/engine/apps/public_api/throttlers/__init__.py b/engine/apps/public_api/throttlers/__init__.py index e69de29b..20dc00d7 100644 --- a/engine/apps/public_api/throttlers/__init__.py +++ b/engine/apps/public_api/throttlers/__init__.py @@ -0,0 +1,3 @@ +from .info_throttler import InfoThrottler # noqa: F401 +from .phone_notification_throttler import PhoneNotificationThrottler # noqa: F401 +from .user_throttle import UserThrottle # noqa: F401 diff --git a/engine/apps/public_api/throttlers/info_throttler.py b/engine/apps/public_api/throttlers/info_throttler.py new file mode 100644 index 00000000..a48bce22 --- /dev/null +++ b/engine/apps/public_api/throttlers/info_throttler.py @@ -0,0 +1,6 @@ +from rest_framework.throttling import UserRateThrottle + + +class InfoThrottler(UserRateThrottle): + scope = "info" + rate = "100/m" diff --git a/engine/apps/public_api/throttlers/phone_notification_throttler.py b/engine/apps/public_api/throttlers/phone_notification_throttler.py new file mode 100644 index 00000000..a66e19a1 --- /dev/null +++ b/engine/apps/public_api/throttlers/phone_notification_throttler.py @@ -0,0 +1,6 @@ +from rest_framework.throttling import UserRateThrottle + + +class PhoneNotificationThrottler(UserRateThrottle): + scope = "phone_notification" + rate = "60/m" diff --git a/engine/apps/public_api/urls.py b/engine/apps/public_api/urls.py index 95fa447a..a91898df 100644 --- a/engine/apps/public_api/urls.py +++ b/engine/apps/public_api/urls.py @@ -30,4 +30,6 @@ router.register(r"teams", views.TeamView, basename="teams") urlpatterns = [ path("", include(router.urls)), optional_slash_path("info", views.InfoView.as_view(), name="info"), + optional_slash_path("make_call", views.MakeCallView.as_view(), name="make_call"), + optional_slash_path("send_sms", views.SendSMSView.as_view(), name="send_sms"), ] diff --git a/engine/apps/public_api/views/__init__.py b/engine/apps/public_api/views/__init__.py index 1892d123..4ffcec04 100644 --- a/engine/apps/public_api/views/__init__.py +++ b/engine/apps/public_api/views/__init__.py @@ -8,6 +8,7 @@ from .integrations import IntegrationView # noqa: F401 from .on_call_shifts import CustomOnCallShiftView # noqa: F401 from .organizations import OrganizationView # noqa: F401 from .personal_notifications import PersonalNotificationView # noqa: F401 +from .phone_notifications import MakeCallView, SendSMSView # noqa: F401 from .resolution_notes import ResolutionNoteView # noqa: F401 from .routes import ChannelFilterView # noqa: F401 from .schedules import OnCallScheduleChannelView # noqa: F401 diff --git a/engine/apps/public_api/views/action.py b/engine/apps/public_api/views/action.py index bbb6bc73..60ca1465 100644 --- a/engine/apps/public_api/views/action.py +++ b/engine/apps/public_api/views/action.py @@ -8,11 +8,11 @@ from apps.auth_token.auth import ApiTokenAuthentication from apps.public_api.serializers.action import ActionSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from common.api_helpers.filters import ByTeamFilter -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class ActionView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet): +class ActionView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) pagination_class = FiftyPageSizePaginator diff --git a/engine/apps/public_api/views/alerts.py b/engine/apps/public_api/views/alerts.py index 56fe651e..da332176 100644 --- a/engine/apps/public_api/views/alerts.py +++ b/engine/apps/public_api/views/alerts.py @@ -6,14 +6,13 @@ from rest_framework.viewsets import GenericViewSet from apps.alerts.models import Alert from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers.alerts import AlertSerializer from apps.public_api.throttlers.user_throttle import UserThrottle -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class AlertView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet): +class AlertView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -23,8 +22,6 @@ class AlertView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, Ge serializer_class = AlertSerializer pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_ALERT_IDS[0] - def get_queryset(self): alert_group_id = self.request.query_params.get("alert_group_id", None) search = self.request.query_params.get("search", None) diff --git a/engine/apps/public_api/views/escalation_policies.py b/engine/apps/public_api/views/escalation_policies.py index fc285588..15203f63 100644 --- a/engine/apps/public_api/views/escalation_policies.py +++ b/engine/apps/public_api/views/escalation_policies.py @@ -5,15 +5,14 @@ from rest_framework.viewsets import ModelViewSet from apps.alerts.models import EscalationPolicy from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import EscalationPolicySerializer, EscalationPolicyUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class EscalationPolicyView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class EscalationPolicyView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -25,8 +24,6 @@ class EscalationPolicyView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializ pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_ESCALATION_POLICY_ID_1 - def get_queryset(self): escalation_chain_id = self.request.query_params.get("escalation_chain_id", None) queryset = EscalationPolicy.objects.filter( diff --git a/engine/apps/public_api/views/incidents.py b/engine/apps/public_api/views/incidents.py index 1bfe830e..cd4d6098 100644 --- a/engine/apps/public_api/views/incidents.py +++ b/engine/apps/public_api/views/incidents.py @@ -8,14 +8,13 @@ from rest_framework.viewsets import GenericViewSet from apps.alerts.models import AlertGroup from apps.alerts.tasks import delete_alert_group, wipe from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.constants import VALID_DATE_FOR_DELETE_INCIDENT from apps.public_api.helpers import is_valid_group_creation_date, team_has_slack_token_for_deleting from apps.public_api.serializers import IncidentSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from common.api_helpers.exceptions import BadRequest from common.api_helpers.filters import ByTeamModelFieldFilterMixin, get_team_queryset -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import FiftyPageSizePaginator @@ -30,9 +29,7 @@ class IncidentByTeamFilter(ByTeamModelFieldFilterMixin, filters.FilterSet): ) -class IncidentView( - RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, mixins.DestroyModelMixin, GenericViewSet -): +class IncidentView(RateLimitHeadersMixin, mixins.ListModelMixin, mixins.DestroyModelMixin, GenericViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -42,8 +39,6 @@ class IncidentView( serializer_class = IncidentSerializer pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_INCIDENT_ID - filter_backends = (filters.DjangoFilterBackend,) filterset_class = IncidentByTeamFilter diff --git a/engine/apps/public_api/views/info.py b/engine/apps/public_api/views/info.py index f9649181..f9cc13ca 100644 --- a/engine/apps/public_api/views/info.py +++ b/engine/apps/public_api/views/info.py @@ -3,14 +3,14 @@ from rest_framework.response import Response from rest_framework.views import APIView from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api.throttlers.user_throttle import UserThrottle +from apps.public_api.throttlers import InfoThrottler class InfoView(APIView): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) - throttle_classes = [UserThrottle] + throttle_classes = [InfoThrottler] def get(self, request): response = {"url": self.request.auth.organization.grafana_url} diff --git a/engine/apps/public_api/views/integrations.py b/engine/apps/public_api/views/integrations.py index 8aa4784e..447c5b2b 100644 --- a/engine/apps/public_api/views/integrations.py +++ b/engine/apps/public_api/views/integrations.py @@ -6,17 +6,11 @@ from rest_framework.viewsets import ModelViewSet from apps.alerts.models import AlertReceiveChannel from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import IntegrationSerializer, IntegrationUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log from common.api_helpers.filters import ByTeamFilter -from common.api_helpers.mixins import ( - DemoTokenMixin, - FilterSerializerMixin, - RateLimitHeadersMixin, - UpdateSerializerMixin, -) +from common.api_helpers.mixins import FilterSerializerMixin, RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator from .maintaiable_object_mixin import MaintainableObjectMixin @@ -24,7 +18,6 @@ from .maintaiable_object_mixin import MaintainableObjectMixin class IntegrationView( RateLimitHeadersMixin, - DemoTokenMixin, FilterSerializerMixin, UpdateSerializerMixin, MaintainableObjectMixin, @@ -41,8 +34,6 @@ class IntegrationView( pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_INTEGRATION_ID - filter_backends = (filters.DjangoFilterBackend,) filterset_class = ByTeamFilter @@ -50,6 +41,10 @@ class IntegrationView( queryset = AlertReceiveChannel.objects.filter(organization=self.request.auth.organization).order_by( "created_at" ) + name = self.request.query_params.get("name", None) + if name is not None: + queryset = queryset.filter(verbal_name=name) + queryset = self.filter_queryset(queryset) queryset = self.serializer_class.setup_eager_loading(queryset) queryset = queryset.annotate(alert_groups_count_annotated=Count("alert_groups", distinct=True)) return queryset diff --git a/engine/apps/public_api/views/on_call_shifts.py b/engine/apps/public_api/views/on_call_shifts.py index 5f366f19..1d0df97a 100644 --- a/engine/apps/public_api/views/on_call_shifts.py +++ b/engine/apps/public_api/views/on_call_shifts.py @@ -4,17 +4,16 @@ from rest_framework.permissions import IsAuthenticated from rest_framework.viewsets import ModelViewSet from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import CustomOnCallShiftSerializer, CustomOnCallShiftUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.schedules.models import CustomOnCallShift from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log from common.api_helpers.filters import ByTeamFilter -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class CustomOnCallShiftView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class CustomOnCallShiftView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -29,8 +28,6 @@ class CustomOnCallShiftView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSeriali filter_backends = [DjangoFilterBackend] filterset_class = ByTeamFilter - demo_default_id = public_api_constants.DEMO_ON_CALL_SHIFT_ID_1 - def get_queryset(self): name = self.request.query_params.get("name", None) schedule_id = self.request.query_params.get("schedule_id", None) diff --git a/engine/apps/public_api/views/organizations.py b/engine/apps/public_api/views/organizations.py index d3bce01e..f4fd1352 100644 --- a/engine/apps/public_api/views/organizations.py +++ b/engine/apps/public_api/views/organizations.py @@ -3,17 +3,15 @@ from rest_framework.settings import api_settings from rest_framework.viewsets import ReadOnlyModelViewSet from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import OrganizationSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.user_management.models import Organization -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import TwentyFivePageSizePaginator class OrganizationView( RateLimitHeadersMixin, - DemoTokenMixin, ReadOnlyModelViewSet, ): authentication_classes = (ApiTokenAuthentication,) @@ -26,8 +24,6 @@ class OrganizationView( pagination_class = TwentyFivePageSizePaginator - demo_default_id = public_api_constants.DEMO_ORGANIZATION_ID - def get_queryset(self): # It's a dirty hack to get queryset from the object. Just in case we'll return multiple teams in the future. return Organization.objects.filter(pk=self.request.auth.organization.pk) diff --git a/engine/apps/public_api/views/personal_notifications.py b/engine/apps/public_api/views/personal_notifications.py index 0b3e0b0a..3119bea9 100644 --- a/engine/apps/public_api/views/personal_notifications.py +++ b/engine/apps/public_api/views/personal_notifications.py @@ -6,17 +6,16 @@ from rest_framework.viewsets import ModelViewSet from apps.auth_token.auth import ApiTokenAuthentication from apps.base.models import UserNotificationPolicy -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import PersonalNotificationRuleSerializer, PersonalNotificationRuleUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.user_management.models import User from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log from common.api_helpers.exceptions import BadRequest -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class PersonalNotificationView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class PersonalNotificationView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -28,8 +27,6 @@ class PersonalNotificationView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSeri pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_PERSONAL_NOTIFICATION_ID_1 - def get_queryset(self): user_id = self.request.query_params.get("user_id", None) important = self.request.query_params.get("important", None) diff --git a/engine/apps/public_api/views/phone_notifications.py b/engine/apps/public_api/views/phone_notifications.py new file mode 100644 index 00000000..f9f96f74 --- /dev/null +++ b/engine/apps/public_api/views/phone_notifications.py @@ -0,0 +1,86 @@ +import logging + +from rest_framework import serializers, status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from rest_framework.views import APIView +from twilio.base.exceptions import TwilioRestException + +from apps.auth_token.auth import ApiTokenAuthentication +from apps.public_api.throttlers.phone_notification_throttler import PhoneNotificationThrottler +from apps.twilioapp.models import PhoneCall, SMSMessage + +logger = logging.getLogger(__name__) + + +class PhoneNotificationDataSerializer(serializers.Serializer): + email = serializers.EmailField() + message = serializers.CharField(max_length=1024) + + +class MakeCallView(APIView): + authentication_classes = (ApiTokenAuthentication,) + permission_classes = (IsAuthenticated,) + + throttle_classes = [ + PhoneNotificationThrottler, + ] + + def post(self, request): + serializer = PhoneNotificationDataSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + + response_data = {} + organization = self.request.auth.organization + logger.info(f"Making cloud call. Email {serializer.validated_data['email']}") + user = organization.users.filter( + email=serializer.validated_data["email"], _verified_phone_number__isnull=False + ).first() + if user is None: + response_data = {"error": "user-not-found"} + return Response(status=status.HTTP_404_NOT_FOUND, data=response_data) + + try: + PhoneCall.make_grafana_cloud_call(user, serializer.validated_data["message"]) + except TwilioRestException as e: + logger.info(f"Making cloud call. Twilio exception {str(e)}") + return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE, data=response_data) + except PhoneCall.PhoneCallsLimitExceeded: + logger.info(f"Making cloud call. PhoneCallsLimitExceeded") + return Response(status=status.HTTP_400_BAD_REQUEST, data={"error": "limit-exceeded"}) + + return Response(status=status.HTTP_200_OK, data=response_data) + + +class SendSMSView(APIView): + authentication_classes = (ApiTokenAuthentication,) + permission_classes = (IsAuthenticated,) + + throttle_classes = [ + PhoneNotificationThrottler, + ] + + def post(self, request): + serializer = PhoneNotificationDataSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + + response_data = {} + organization = self.request.auth.organization + logger.info(f"Sending cloud sms. Email {serializer.validated_data['email']}") + user = organization.users.filter( + email=serializer.validated_data["email"], _verified_phone_number__isnull=False + ).first() + if user is None: + response_data = {"error": "user-not-found"} + return Response(status=status.HTTP_404_NOT_FOUND, data=response_data) + + try: + SMSMessage.send_grafana_cloud_sms(user, serializer.validated_data["message"]) + except TwilioRestException as e: + logger.info(f"Sending cloud sms. Twilio exception {str(e)}") + return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE, data=response_data) + except SMSMessage.SMSLimitExceeded: + logger.info(f"Sending cloud sms. PhoneCallsLimitExceeded") + return Response(status=status.HTTP_400_BAD_REQUEST, data={"error": "limit-exceeded"}) + + return Response(status=status.HTTP_200_OK, data=response_data) diff --git a/engine/apps/public_api/views/resolution_notes.py b/engine/apps/public_api/views/resolution_notes.py index 16e3fa41..7d07ca1f 100644 --- a/engine/apps/public_api/views/resolution_notes.py +++ b/engine/apps/public_api/views/resolution_notes.py @@ -6,14 +6,13 @@ from rest_framework.viewsets import ModelViewSet from apps.alerts.models import ResolutionNote from apps.alerts.tasks import send_update_resolution_note_signal from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers.resolution_notes import ResolutionNoteSerializer, ResolutionNoteUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class ResolutionNoteView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class ResolutionNoteView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -28,8 +27,6 @@ class ResolutionNoteView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializer pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_RESOLUTION_NOTE_ID - def get_queryset(self): alert_group_id = self.request.query_params.get("alert_group_id", None) queryset = ResolutionNote.objects.filter( diff --git a/engine/apps/public_api/views/routes.py b/engine/apps/public_api/views/routes.py index a353a962..c7afa492 100644 --- a/engine/apps/public_api/views/routes.py +++ b/engine/apps/public_api/views/routes.py @@ -7,16 +7,15 @@ from rest_framework.viewsets import ModelViewSet from apps.alerts.models import ChannelFilter from apps.auth_token.auth import ApiTokenAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.serializers import ChannelFilterSerializer, ChannelFilterUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log from common.api_helpers.exceptions import BadRequest -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import TwentyFivePageSizePaginator -class ChannelFilterView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class ChannelFilterView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -31,8 +30,6 @@ class ChannelFilterView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerM filter_backends = [DjangoFilterBackend] filterset_fields = ["alert_receive_channel"] - demo_default_id = public_api_constants.DEMO_ROUTE_ID_1 - def get_queryset(self): integration_id = self.request.query_params.get("integration_id", None) routing_regex = self.request.query_params.get("routing_regex", None) diff --git a/engine/apps/public_api/views/schedules.py b/engine/apps/public_api/views/schedules.py index 16f6a17a..946463cb 100644 --- a/engine/apps/public_api/views/schedules.py +++ b/engine/apps/public_api/views/schedules.py @@ -7,7 +7,6 @@ from rest_framework.views import Response from rest_framework.viewsets import ModelViewSet from apps.auth_token.auth import ApiTokenAuthentication, ScheduleExportAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.custom_renderers import CalendarRenderer from apps.public_api.serializers import PolymorphicScheduleSerializer, PolymorphicScheduleUpdateSerializer from apps.public_api.throttlers.user_throttle import UserThrottle @@ -16,11 +15,11 @@ from apps.schedules.models import OnCallSchedule from apps.slack.tasks import update_slack_user_group_for_schedules from apps.user_management.organization_log_creator import OrganizationLogType, create_organization_log from common.api_helpers.filters import ByTeamFilter -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, UpdateSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, UpdateSerializerMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class OnCallScheduleChannelView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSerializerMixin, ModelViewSet): +class OnCallScheduleChannelView(RateLimitHeadersMixin, UpdateSerializerMixin, ModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -32,8 +31,6 @@ class OnCallScheduleChannelView(RateLimitHeadersMixin, DemoTokenMixin, UpdateSer pagination_class = FiftyPageSizePaginator - demo_default_id = public_api_constants.DEMO_SCHEDULE_ID_ICAL - filter_backends = (filters.DjangoFilterBackend,) filterset_class = ByTeamFilter diff --git a/engine/apps/public_api/views/slack_channels.py b/engine/apps/public_api/views/slack_channels.py index f261f0b6..14d53247 100644 --- a/engine/apps/public_api/views/slack_channels.py +++ b/engine/apps/public_api/views/slack_channels.py @@ -6,11 +6,11 @@ from apps.auth_token.auth import ApiTokenAuthentication from apps.public_api.serializers.slack_channel import SlackChannelSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.slack.models import SlackChannel -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class SlackChannelView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet): +class SlackChannelView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) pagination_class = FiftyPageSizePaginator diff --git a/engine/apps/public_api/views/user_groups.py b/engine/apps/public_api/views/user_groups.py index 4e6bbaf3..2859199d 100644 --- a/engine/apps/public_api/views/user_groups.py +++ b/engine/apps/public_api/views/user_groups.py @@ -6,11 +6,11 @@ from apps.auth_token.auth import ApiTokenAuthentication from apps.public_api.serializers.user_groups import UserGroupSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.slack.models import SlackUserGroup -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin +from common.api_helpers.mixins import RateLimitHeadersMixin from common.api_helpers.paginators import FiftyPageSizePaginator -class UserGroupView(RateLimitHeadersMixin, DemoTokenMixin, mixins.ListModelMixin, GenericViewSet): +class UserGroupView(RateLimitHeadersMixin, mixins.ListModelMixin, GenericViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) pagination_class = FiftyPageSizePaginator diff --git a/engine/apps/public_api/views/users.py b/engine/apps/public_api/views/users.py index 815c6553..54439d6e 100644 --- a/engine/apps/public_api/views/users.py +++ b/engine/apps/public_api/views/users.py @@ -5,19 +5,18 @@ from rest_framework.views import Response from rest_framework.viewsets import ReadOnlyModelViewSet from apps.auth_token.auth import ApiTokenAuthentication, UserScheduleExportAuthentication -from apps.public_api import constants as public_api_constants from apps.public_api.custom_renderers import CalendarRenderer from apps.public_api.serializers import FastUserSerializer, UserSerializer from apps.public_api.throttlers.user_throttle import UserThrottle from apps.schedules.ical_utils import user_ical_export from apps.schedules.models import OnCallSchedule from apps.user_management.models import User -from common.api_helpers.mixins import DemoTokenMixin, RateLimitHeadersMixin, ShortSerializerMixin +from common.api_helpers.mixins import RateLimitHeadersMixin, ShortSerializerMixin from common.api_helpers.paginators import HundredPageSizePaginator from common.constants.role import Role -class UserView(RateLimitHeadersMixin, ShortSerializerMixin, DemoTokenMixin, ReadOnlyModelViewSet): +class UserView(RateLimitHeadersMixin, ShortSerializerMixin, ReadOnlyModelViewSet): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) @@ -29,16 +28,18 @@ class UserView(RateLimitHeadersMixin, ShortSerializerMixin, DemoTokenMixin, Read throttle_classes = [UserThrottle] - demo_default_id = public_api_constants.DEMO_USER_ID - def get_queryset(self): username = self.request.query_params.get("username") + email = self.request.query_params.get("email") is_short_request = self.request.query_params.get("short", "false") == "true" queryset = self.request.auth.organization.users.filter(role__in=[Role.ADMIN, Role.EDITOR]).distinct() if username is not None: queryset = queryset.filter(username=username) + if email is not None: + queryset = queryset.filter(email=email) + if not is_short_request: queryset = self.serializer_class.setup_eager_loading(queryset) return queryset.order_by("id") diff --git a/engine/apps/schedules/ical_utils.py b/engine/apps/schedules/ical_utils.py index 309729cd..cb0bc342 100644 --- a/engine/apps/schedules/ical_utils.py +++ b/engine/apps/schedules/ical_utils.py @@ -26,14 +26,18 @@ if TYPE_CHECKING: from apps.user_management.models import User -def users_in_ical(usernames_from_ical, organization): +def users_in_ical(usernames_from_ical, organization, include_viewers=False): """ Parse ical file and return list of users found """ # Only grafana username will be used, consider adding grafana email and id - users_found_in_ical = organization.users.filter( - Q(role__in=(Role.ADMIN, Role.EDITOR)) & (Q(username__in=usernames_from_ical) | Q(email__in=usernames_from_ical)) + users_found_in_ical = organization.users + if not include_viewers: + users_found_in_ical = users_found_in_ical.filter(role__in=(Role.ADMIN, Role.EDITOR)) + + users_found_in_ical = users_found_in_ical.filter( + (Q(username__in=usernames_from_ical) | Q(email__in=usernames_from_ical)) ).distinct() # Here is the example how we extracted users previously, using slack fields too @@ -260,15 +264,17 @@ def list_of_empty_shifts_in_schedule(schedule, start_date, end_date): return sorted(empty_shifts, key=lambda dt: dt.start) -def list_users_to_notify_from_ical(schedule, events_datetime=None): +def list_users_to_notify_from_ical(schedule, events_datetime=None, include_viewers=False): """ Retrieve on-call users for the current time """ events_datetime = events_datetime if events_datetime else timezone.datetime.now(timezone.utc) - return list_users_to_notify_from_ical_for_period(schedule, events_datetime, events_datetime) + return list_users_to_notify_from_ical_for_period( + schedule, events_datetime, events_datetime, include_viewers=include_viewers + ) -def list_users_to_notify_from_ical_for_period(schedule, start_datetime, end_datetime): +def list_users_to_notify_from_ical_for_period(schedule, start_datetime, end_datetime, include_viewers=False): # get list of iCalendars from current iCal files. If there is more than one calendar, primary calendar will always # be the first calendars = schedule.get_icalendars() @@ -286,7 +292,7 @@ def list_users_to_notify_from_ical_for_period(schedule, start_datetime, end_date parsed_ical_events.setdefault(current_priority, []).extend(current_usernames) # find users by usernames. if users are not found for shift, get users from lower priority for _, usernames in sorted(parsed_ical_events.items(), reverse=True): - users_found_in_ical = users_in_ical(usernames, schedule.organization) + users_found_in_ical = users_in_ical(usernames, schedule.organization, include_viewers=include_viewers) if users_found_in_ical: break if users_found_in_ical: diff --git a/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py b/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py index 5d681bb7..82d96a7e 100644 --- a/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py +++ b/engine/apps/schedules/tasks/notify_about_empty_shifts_in_schedule.py @@ -4,7 +4,6 @@ from django.apps import apps from django.core.cache import cache from django.utils import timezone -from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL from apps.schedules.ical_utils import list_of_empty_shifts_in_schedule from apps.slack.utils import format_datetime_to_slack, post_message_to_channel from common.custom_celery_tasks import shared_dedicated_queue_retry_task @@ -19,9 +18,7 @@ def start_check_empty_shifts_in_schedule(): task_logger.info("Start start_notify_about_empty_shifts_in_schedule") - schedules = OnCallSchedule.objects.exclude( - public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL) - ) + schedules = OnCallSchedule.objects.all() for schedule in schedules: check_empty_shifts_in_schedule.apply_async((schedule.pk,)) @@ -58,7 +55,7 @@ def start_notify_about_empty_shifts_in_schedule(): schedules = OnCallSchedule.objects.filter( empty_shifts_report_sent_at__lte=week_ago, channel__isnull=False, - ).exclude(public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL)) + ) for schedule in schedules: notify_about_empty_shifts_in_schedule.apply_async((schedule.pk,)) diff --git a/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py b/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py index 4a4749f6..76d8bfd8 100644 --- a/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py +++ b/engine/apps/schedules/tasks/notify_about_gaps_in_schedule.py @@ -4,7 +4,6 @@ from django.apps import apps from django.core.cache import cache from django.utils import timezone -from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL from apps.schedules.ical_utils import list_of_gaps_in_schedule from apps.slack.utils import format_datetime_to_slack, post_message_to_channel from common.custom_celery_tasks import shared_dedicated_queue_retry_task @@ -18,9 +17,7 @@ def start_check_gaps_in_schedule(): task_logger.info("Start start_check_gaps_in_schedule") - schedules = OnCallSchedule.objects.exclude( - public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL) - ) + schedules = OnCallSchedule.objects.all() for schedule in schedules: check_gaps_in_schedule.apply_async((schedule.pk,)) @@ -57,7 +54,7 @@ def start_notify_about_gaps_in_schedule(): schedules = OnCallSchedule.objects.filter( gaps_report_sent_at__lte=week_ago, channel__isnull=False, - ).exclude(public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL)) + ) for schedule in schedules: notify_about_gaps_in_schedule.apply_async((schedule.pk,)) diff --git a/engine/apps/schedules/tasks/refresh_ical_files.py b/engine/apps/schedules/tasks/refresh_ical_files.py index 083e198f..5e446b8c 100644 --- a/engine/apps/schedules/tasks/refresh_ical_files.py +++ b/engine/apps/schedules/tasks/refresh_ical_files.py @@ -2,7 +2,6 @@ from celery.utils.log import get_task_logger from django.apps import apps from apps.alerts.tasks import notify_ical_schedule_shift -from apps.public_api.constants import DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL from apps.schedules.ical_utils import is_icals_equal from apps.schedules.tasks import notify_about_empty_shifts_in_schedule, notify_about_gaps_in_schedule from apps.slack.tasks import start_update_slack_user_group_for_schedules @@ -17,9 +16,7 @@ def start_refresh_ical_files(): task_logger.info("Start refresh ical files") - schedules = OnCallSchedule.objects.all().exclude( - public_primary_key__in=(DEMO_SCHEDULE_ID_CALENDAR, DEMO_SCHEDULE_ID_ICAL) - ) + schedules = OnCallSchedule.objects.all() for schedule in schedules: refresh_ical_file.apply_async((schedule.pk,)) diff --git a/engine/apps/schedules/tests/test_ical_utils.py b/engine/apps/schedules/tests/test_ical_utils.py new file mode 100644 index 00000000..8032334d --- /dev/null +++ b/engine/apps/schedules/tests/test_ical_utils.py @@ -0,0 +1,60 @@ +import pytest +from django.utils import timezone + +from apps.schedules.ical_utils import list_users_to_notify_from_ical, users_in_ical +from apps.schedules.models import CustomOnCallShift, OnCallScheduleCalendar +from common.constants.role import Role + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "include_viewers", + [True, False], +) +def test_users_in_ical_viewers_inclusion(make_organization_and_user, make_user_for_organization, include_viewers): + organization, user = make_organization_and_user() + viewer = make_user_for_organization(organization, Role.VIEWER) + + usernames = [user.username, viewer.username] + result = users_in_ical(usernames, organization, include_viewers=include_viewers) + if include_viewers: + assert set(result) == {user, viewer} + else: + assert set(result) == {user} + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "include_viewers", + [True, False], +) +def test_list_users_to_notify_from_ical_viewers_inclusion( + make_organization_and_user, make_user_for_organization, make_schedule, make_on_call_shift, include_viewers +): + organization, user = make_organization_and_user() + viewer = make_user_for_organization(organization, Role.VIEWER) + + schedule = make_schedule(organization, schedule_class=OnCallScheduleCalendar) + date = timezone.now().replace(tzinfo=None, microsecond=0) + data = { + "priority_level": 1, + "start": date, + "duration": timezone.timedelta(seconds=10800), + } + on_call_shift = make_on_call_shift( + organization=organization, shift_type=CustomOnCallShift.TYPE_SINGLE_EVENT, **data + ) + on_call_shift.users.add(user) + on_call_shift.users.add(viewer) + schedule.custom_on_call_shifts.add(on_call_shift) + + # get users on-call + date = date + timezone.timedelta(minutes=5) + users_on_call = list_users_to_notify_from_ical(schedule, date, include_viewers=include_viewers) + + if include_viewers: + assert len(users_on_call) == 2 + assert set(users_on_call) == {user, viewer} + else: + assert len(users_on_call) == 1 + assert set(users_on_call) == {user} diff --git a/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py b/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py deleted file mode 100644 index ae3368f1..00000000 --- a/engine/apps/slack/migrations/0003_squashed_create_demo_token_instances.py +++ /dev/null @@ -1,47 +0,0 @@ -# Generated by Django 3.2.5 on 2021-08-04 10:51 - -import sys -from django.db import migrations -from apps.public_api import constants as public_api_constants - - -def create_demo_token_instances(apps, schema_editor): - if not (len(sys.argv) > 1 and sys.argv[1] == 'test'): - SlackUserIdentity = apps.get_model('slack', 'SlackUserIdentity') - SlackTeamIdentity = apps.get_model('slack', 'SlackTeamIdentity') - SlackChannel = apps.get_model('slack', 'SlackChannel') - SlackUserGroup = apps.get_model("slack", "SlackUserGroup") - - slack_team_identity, _ = SlackTeamIdentity.objects.get_or_create( - slack_id=public_api_constants.DEMO_SLACK_TEAM_ID, - ) - SlackUserIdentity.objects.get_or_create( - slack_id=public_api_constants.DEMO_SLACK_USER_ID, - slack_team_identity=slack_team_identity, - ) - - SlackChannel.objects.get_or_create( - name=public_api_constants.DEMO_SLACK_CHANNEL_NAME, - slack_id=public_api_constants.DEMO_SLACK_CHANNEL_SLACK_ID, - slack_team_identity=slack_team_identity, - ) - - SlackUserGroup.objects.get_or_create( - slack_team_identity=slack_team_identity, - slack_id=public_api_constants.DEMO_SLACK_USER_GROUP_SLACK_ID, - public_primary_key=public_api_constants.DEMO_SLACK_USER_GROUP_ID, - name=public_api_constants.DEMO_SLACK_USER_GROUP_NAME, - handle=public_api_constants.DEMO_SLACK_USER_GROUP_HANDLE, - is_active=True, - ) - - -class Migration(migrations.Migration): - - dependencies = [ - ('slack', '0002_squashed_initial'), - ] - - operations = [ - migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop) - ] diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py index edf0a704..1ccba05f 100644 --- a/engine/apps/slack/scenarios/alertgroup_appearance.py +++ b/engine/apps/slack/scenarios/alertgroup_appearance.py @@ -56,7 +56,11 @@ class OpenAlertAppearanceDialogStep( raw_request_data = json.dumps(alert_group.alerts.first().raw_request_data, sort_keys=True, indent=4) # This is a special case for amazon sns notifications in str format CHEKED - if alert_group.channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS and raw_request_data == "{}": + if ( + AlertReceiveChannel.INTEGRATION_AMAZON_SNS is not None + and alert_group.channel.integration == AlertReceiveChannel.INTEGRATION_AMAZON_SNS + and raw_request_data == "{}" + ): raw_request_data = alert_group.alerts.first().message raw_request_data_chunks = [ diff --git a/engine/apps/slack/tasks.py b/engine/apps/slack/tasks.py index 48c688be..e2c250a0 100644 --- a/engine/apps/slack/tasks.py +++ b/engine/apps/slack/tasks.py @@ -9,8 +9,6 @@ from django.core.cache import cache from django.utils import timezone from apps.alerts.tasks.compare_escalations import compare_escalations -from apps.public_api import constants as public_constants -from apps.public_api.constants import DEMO_SLACK_USER_GROUP_ID from apps.slack.constants import CACHE_UPDATE_INCIDENT_SLACK_MESSAGE_LIFETIME, SLACK_BOT_ID from apps.slack.scenarios.escalation_delivery import EscalationDeliveryStep from apps.slack.scenarios.scenario_step import ScenarioStep @@ -499,7 +497,7 @@ def populate_slack_usergroups(): slack_team_identities = SlackTeamIdentity.objects.filter( detected_token_revoked__isnull=True, - ).exclude(slack_id=public_constants.DEMO_SLACK_TEAM_ID) + ) delay = 0 counter = 0 @@ -642,10 +640,7 @@ def start_update_slack_user_group_for_schedules(): SlackUserGroup = apps.get_model("slack", "SlackUserGroup") user_group_pks = ( - SlackUserGroup.objects.exclude(public_primary_key=DEMO_SLACK_USER_GROUP_ID) - .filter(oncall_schedules__isnull=False) - .distinct() - .values_list("pk", flat=True) + SlackUserGroup.objects.filter(oncall_schedules__isnull=False).distinct().values_list("pk", flat=True) ) for user_group_pk in user_group_pks: @@ -673,7 +668,7 @@ def populate_slack_channels(): slack_team_identities = SlackTeamIdentity.objects.filter( detected_token_revoked__isnull=True, - ).exclude(slack_id=public_constants.DEMO_SLACK_TEAM_ID) + ) delay = 0 counter = 0 diff --git a/engine/apps/slack/views.py b/engine/apps/slack/views.py index 8988594c..5990b0b6 100644 --- a/engine/apps/slack/views.py +++ b/engine/apps/slack/views.py @@ -119,6 +119,9 @@ class SlackEventApiEndpointView(APIView): return Response(status=403) if not settings.DEBUG: + if live_settings.SLACK_SIGNING_SECRET is None and settings.SLACK_SIGNING_SECRET_LIVE: + raise Exception("Please specify SLACK_SIGNING_SECRET or use DEBUG.") + if not ( SlackEventApiEndpointView.verify_signature( slack_request_timestamp, slack_signature, body, live_settings.SLACK_SIGNING_SECRET diff --git a/engine/apps/social_auth/live_setting_django_strategy.py b/engine/apps/social_auth/live_setting_django_strategy.py index dd913e67..d2cf0fe1 100644 --- a/engine/apps/social_auth/live_setting_django_strategy.py +++ b/engine/apps/social_auth/live_setting_django_strategy.py @@ -34,8 +34,10 @@ class LiveSettingDjangoStrategy(DjangoStrategy): def build_absolute_uri(self, path=None): """ - Overriden DjangoStrategy's method to substitute and force the host value from ENV + Overridden DjangoStrategy's method to substitute and force the host value from ENV """ + if live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None: + return live_settings.SLACK_INSTALL_RETURN_REDIRECT_HOST + path if settings.SLACK_INSTALL_RETURN_REDIRECT_HOST is not None and path is not None: return settings.SLACK_INSTALL_RETURN_REDIRECT_HOST + path if self.request: diff --git a/engine/apps/telegram/client.py b/engine/apps/telegram/client.py index 280e26c5..4de25de1 100644 --- a/engine/apps/telegram/client.py +++ b/engine/apps/telegram/client.py @@ -1,6 +1,6 @@ from typing import Optional, Tuple, Union +from urllib.parse import urljoin -from django.conf import settings from telegram import Bot, InlineKeyboardMarkup, Message, ParseMode from telegram.error import InvalidToken, Unauthorized from telegram.utils.request import Request @@ -34,7 +34,10 @@ class TelegramClient: return False def register_webhook(self, webhook_url: Optional[str] = None) -> None: - webhook_url = webhook_url or settings.TELEGRAM_WEBHOOK_URL + webhook_url = webhook_url or urljoin(live_settings.TELEGRAM_WEBHOOK_HOST, "/telegram/") + + if webhook_url is None: + webhook_url = live_settings.TELEGRAM_WEBHOOK_URL webhook_info = self.api_client.get_webhook_info() if webhook_info.url == webhook_url: diff --git a/engine/apps/twilioapp/migrations/0002_auto_20220604_1008.py b/engine/apps/twilioapp/migrations/0002_auto_20220604_1008.py new file mode 100644 index 00000000..cddd898c --- /dev/null +++ b/engine/apps/twilioapp/migrations/0002_auto_20220604_1008.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.5 on 2022-06-04 10:08 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('twilioapp', '0001_squashed_initial'), + ] + + operations = [ + migrations.AddField( + model_name='phonecall', + name='grafana_cloud_notification', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='smsmessage', + name='grafana_cloud_notification', + field=models.BooleanField(default=False), + ), + ] diff --git a/engine/apps/twilioapp/models/phone_call.py b/engine/apps/twilioapp/models/phone_call.py index 7d5ae0f9..64b4304e 100644 --- a/engine/apps/twilioapp/models/phone_call.py +++ b/engine/apps/twilioapp/models/phone_call.py @@ -1,14 +1,20 @@ import logging +from urllib.parse import urljoin +import requests from django.apps import apps +from django.conf import settings from django.db import models +from rest_framework import status from twilio.base.exceptions import TwilioRestException from apps.alerts.constants import ActionSource from apps.alerts.incident_appearance.renderers.phone_call_renderer import AlertGroupPhoneCallRenderer from apps.alerts.signals import user_notification_action_triggered_signal +from apps.base.utils import live_settings from apps.twilioapp.constants import TwilioCallStatuses from apps.twilioapp.twilio_client import twilio_client +from common.utils import clean_markup, escape_for_twilio_phone_call logger = logging.getLogger(__name__) @@ -34,8 +40,10 @@ class PhoneCallManager(models.Manager): if phone_call_qs.exists() and status: phone_call_qs.update(status=status) - phone_call = phone_call_qs.first() + if phone_call.grafana_cloud_notification: + # If call was made via grafana twilio it is don't needed to create logs on it's delivery status. + return log_record = None if status == TwilioCallStatuses.COMPLETED: log_record = UserNotificationPolicyLogRecord( @@ -115,6 +123,17 @@ class PhoneCall(models.Model): created_at = models.DateTimeField(auto_now_add=True) + grafana_cloud_notification = models.BooleanField(default=False) + + class PhoneCallsLimitExceeded(Exception): + """Phone calls limit exceeded""" + + class PhoneNumberNotVerifiedError(Exception): + """Phone number is not verified""" + + class CloudSendError(Exception): + """Error making call through cloud""" + def process_digit(self, digit): """The function process pressed digit at time of call to user @@ -138,57 +157,59 @@ class PhoneCall(models.Model): return bool(self.represents_alert_group.slack_message) @classmethod - def make_call(cls, user, alert_group, notification_policy): - UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") - - organization = alert_group.channel.organization - - log_record = None - if user.verified_phone_number: - # Create a PhoneCall object in db - phone_call = PhoneCall( - represents_alert_group=alert_group, - receiver=user, - notification_policy=notification_policy, - ) - - phone_calls_left = organization.phone_calls_left(user) - - if phone_calls_left > 0: - phone_call.exceeded_limit = False - renderer = AlertGroupPhoneCallRenderer(alert_group) - message_body = renderer.render() - if phone_calls_left < 3: - message_body += " {} phone calls left. Contact your admin.".format(phone_calls_left) - try: - twilio_call = twilio_client.make_call(message_body, user.verified_phone_number) - except TwilioRestException: - log_record = UserNotificationPolicyLogRecord( - author=user, - type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, - notification_policy=notification_policy, - alert_group=alert_group, - notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_CALL, - notification_step=notification_policy.step if notification_policy else None, - notification_channel=notification_policy.notify_by if notification_policy else None, - ) - else: - if twilio_call.status and twilio_call.sid: - phone_call.status = TwilioCallStatuses.DETERMINANT.get(twilio_call.status, None) - phone_call.sid = twilio_call.sid - else: - log_record = UserNotificationPolicyLogRecord( - author=user, - type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, - notification_policy=notification_policy, - alert_group=alert_group, - notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED, - notification_step=notification_policy.step if notification_policy else None, - notification_channel=notification_policy.notify_by if notification_policy else None, - ) - phone_call.exceeded_limit = True - phone_call.save() + def _make_cloud_call(cls, user, message_body): + url = urljoin(settings.GRAFANA_CLOUD_ONCALL_API_URL, "api/v1/make_call") + auth = {"Authorization": live_settings.GRAFANA_CLOUD_ONCALL_TOKEN} + data = { + "email": user.email, + "message": message_body, + } + try: + response = requests.post(url, headers=auth, data=data, timeout=5) + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to make call through cloud. Request exception {str(e)}") + raise PhoneCall.CloudSendError("Unable to make call through cloud: request failed") + if response.status_code == status.HTTP_200_OK: + logger.info("Make cloud call successfully") + if response.status_code == status.HTTP_400_BAD_REQUEST and response.json().get("error") == "limit-exceeded": + raise PhoneCall.PhoneCallsLimitExceeded("Organization calls limit exceeded") + elif response.status_code == status.HTTP_404_NOT_FOUND: + raise PhoneCall.CloudSendError("Unable to make call through cloud: user not found") else: + raise PhoneCall.CloudSendError("Unable to make call through cloud: server error") + + @classmethod + def make_call(cls, user, alert_group, notification_policy, is_cloud_notification=False): + UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") + log_record = None + renderer = AlertGroupPhoneCallRenderer(alert_group) + message_body = renderer.render() + try: + if is_cloud_notification: + cls._make_cloud_call(user, message_body) + else: + cls._make_call(user, message_body, alert_group=alert_group, notification_policy=notification_policy) + except (TwilioRestException, PhoneCall.CloudSendError): + log_record = UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + notification_policy=notification_policy, + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_CALL, + notification_step=notification_policy.step if notification_policy else None, + notification_channel=notification_policy.notify_by if notification_policy else None, + ) + except PhoneCall.PhoneCallsLimitExceeded: + log_record = UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + notification_policy=notification_policy, + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_PHONE_CALLS_LIMIT_EXCEEDED, + notification_step=notification_policy.step if notification_policy else None, + notification_channel=notification_policy.notify_by if notification_policy else None, + ) + except PhoneCall.PhoneNumberNotVerifiedError: log_record = UserNotificationPolicyLogRecord( author=user, type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, @@ -203,6 +224,41 @@ class PhoneCall(models.Model): log_record.save() user_notification_action_triggered_signal.send(sender=PhoneCall.make_call, log_record=log_record) + @classmethod + def make_grafana_cloud_call(cls, user, message_body): + message_body = escape_for_twilio_phone_call(clean_markup(message_body)) + cls._make_call(user, message_body, grafana_cloud=True) + + @classmethod + def _make_call(cls, user, message_body, alert_group=None, notification_policy=None, grafana_cloud=False): + if not user.verified_phone_number: + raise PhoneCall.PhoneNumberNotVerifiedError("User phone number is not verified") + + phone_call = PhoneCall( + represents_alert_group=alert_group, + receiver=user, + notification_policy=notification_policy, + grafana_cloud_notification=grafana_cloud, + ) + phone_calls_left = user.organization.phone_calls_left(user) + + if phone_calls_left <= 0: + phone_call.exceeded_limit = True + phone_call.save() + raise PhoneCall.PhoneCallsLimitExceeded("Organization calls limit exceeded") + + phone_call.exceeded_limit = False + if phone_calls_left < 3: + message_body += " {} phone calls left. Contact your admin.".format(phone_calls_left) + + twilio_call = twilio_client.make_call(message_body, user.verified_phone_number) + if twilio_call.status and twilio_call.sid: + phone_call.status = TwilioCallStatuses.DETERMINANT.get(twilio_call.status, None) + phone_call.sid = twilio_call.sid + phone_call.save() + + return phone_call + @staticmethod def get_error_code_by_twilio_status(status): UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") diff --git a/engine/apps/twilioapp/models/sms_message.py b/engine/apps/twilioapp/models/sms_message.py index 09404e56..00e98e4b 100644 --- a/engine/apps/twilioapp/models/sms_message.py +++ b/engine/apps/twilioapp/models/sms_message.py @@ -1,13 +1,19 @@ import logging +from urllib.parse import urljoin +import requests from django.apps import apps +from django.conf import settings from django.db import models +from rest_framework import status from twilio.base.exceptions import TwilioRestException from apps.alerts.incident_appearance.renderers.sms_renderer import AlertGroupSmsRenderer from apps.alerts.signals import user_notification_action_triggered_signal +from apps.base.utils import live_settings from apps.twilioapp.constants import TwilioMessageStatuses from apps.twilioapp.twilio_client import twilio_client +from common.utils import clean_markup logger = logging.getLogger(__name__) @@ -36,7 +42,9 @@ class SMSMessageManager(models.Manager): sms_message_qs.update(status=status) sms_message = sms_message_qs.first() - + if sms_message.grafana_cloud_notification: + # If sms was sent via grafana cloud notifications don't create logs on its delivery status. + return log_record = None if status == TwilioMessageStatuses.DELIVERED: @@ -90,6 +98,7 @@ class SMSMessage(models.Model): null=True, choices=TwilioMessageStatuses.CHOICES, ) + grafana_cloud_notification = models.BooleanField(default=False) # https://www.twilio.com/docs/sms/api/message-resource#message-properties sid = models.CharField( @@ -99,66 +108,77 @@ class SMSMessage(models.Model): created_at = models.DateTimeField(auto_now_add=True) + class SMSLimitExceeded(Exception): + """SMS limit exceeded""" + + class PhoneNumberNotVerifiedError(Exception): + """Phone number is not verified""" + + class CloudSendError(Exception): + """SMS sending through cloud error""" + @property def created_for_slack(self): return bool(self.represents_alert_group.slack_message) @classmethod - def send_sms(cls, user, alert_group, notification_policy): + def _send_cloud_sms(cls, user, message_body): + url = urljoin(settings.GRAFANA_CLOUD_ONCALL_API_URL, "api/v1/send_sms") + auth = {"Authorization": live_settings.GRAFANA_CLOUD_ONCALL_TOKEN} + data = { + "email": user.email, + "message": message_body, + } + try: + response = requests.post(url, headers=auth, data=data, timeout=5) + except requests.exceptions.RequestException as e: + logger.warning(f"Unable to send SMS through cloud. Request exception {str(e)}") + raise SMSMessage.CloudSendError("Unable to send SMS through cloud: request failed") + if response.status_code == status.HTTP_200_OK: + logger.info("Sent cloud sms successfully") + elif response.status_code == status.HTTP_400_BAD_REQUEST and response.json().get("error") == "limit-exceeded": + raise SMSMessage.SMSLimitExceeded("Organization sms limit exceeded") + elif response.status_code == status.HTTP_404_NOT_FOUND: + raise SMSMessage.CloudSendError("Unable to send SMS through cloud: user not found") + else: + raise SMSMessage.CloudSendError("Unable to send SMS through cloud: server error") + + @classmethod + def send_sms(cls, user, alert_group, notification_policy, is_cloud_notification=False): UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") - organization = alert_group.channel.organization - log_record = None - if user.verified_phone_number: - # Create an SMS object in db - sms_message = SMSMessage( - represents_alert_group=alert_group, receiver=user, notification_policy=notification_policy - ) - - sms_left = organization.sms_left(user) - if sms_left > 0: - # Mark is as successfully sent - sms_message.exceeded_limit = False - # Render alert message for sms - renderer = AlertGroupSmsRenderer(alert_group) - message_body = renderer.render() - # Notify if close to limit - if sms_left < 3: - message_body += " {} sms left. Contact your admin.".format(sms_left) - # Send an sms - try: - twilio_message = twilio_client.send_message(message_body, user.verified_phone_number) - except TwilioRestException: - log_record = UserNotificationPolicyLogRecord( - author=user, - type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, - notification_policy=notification_policy, - alert_group=alert_group, - notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_SMS, - notification_step=notification_policy.step if notification_policy else None, - notification_channel=notification_policy.notify_by if notification_policy else None, - ) - else: - if twilio_message.status and twilio_message.sid: - sms_message.status = TwilioMessageStatuses.DETERMINANT.get(twilio_message.status, None) - sms_message.sid = twilio_message.sid + renderer = AlertGroupSmsRenderer(alert_group) + message_body = renderer.render() + try: + if is_cloud_notification: + cls._send_cloud_sms(user, message_body) else: - # If no more sms left, mark as exceeded limit - log_record = UserNotificationPolicyLogRecord( - author=user, - type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, - notification_policy=notification_policy, - alert_group=alert_group, - notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED, - notification_step=notification_policy.step if notification_policy else None, - notification_channel=notification_policy.notify_by if notification_policy else None, - ) - sms_message.exceeded_limit = True - - # Save object - sms_message.save() - else: + cls._send_sms(user, message_body, alert_group=alert_group, notification_policy=notification_policy) + except (TwilioRestException, SMSMessage.CloudSendError) as e: + logger.warning(f"Unable to send sms. Exception {e}") + log_record = UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + notification_policy=notification_policy, + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_NOT_ABLE_TO_SEND_SMS, + notification_step=notification_policy.step if notification_policy else None, + notification_channel=notification_policy.notify_by if notification_policy else None, + ) + except SMSMessage.SMSLimitExceeded as e: + logger.warning(f"Unable to send sms. Exception {e}") + log_record = UserNotificationPolicyLogRecord( + author=user, + type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, + notification_policy=notification_policy, + alert_group=alert_group, + notification_error_code=UserNotificationPolicyLogRecord.ERROR_NOTIFICATION_SMS_LIMIT_EXCEEDED, + notification_step=notification_policy.step if notification_policy else None, + notification_channel=notification_policy.notify_by if notification_policy else None, + ) + except SMSMessage.PhoneNumberNotVerifiedError as e: + logger.warning(f"Unable to send sms. Exception {e}") log_record = UserNotificationPolicyLogRecord( author=user, type=UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED, @@ -173,6 +193,41 @@ class SMSMessage(models.Model): log_record.save() user_notification_action_triggered_signal.send(sender=SMSMessage.send_sms, log_record=log_record) + @classmethod + def send_grafana_cloud_sms(cls, user, message_body): + message_body = clean_markup(message_body) + cls._send_sms(user, message_body, grafana_cloud=True) + + @classmethod + def _send_sms(cls, user, message_body, alert_group=None, notification_policy=None, grafana_cloud=False): + if not user.verified_phone_number: + raise SMSMessage.PhoneNumberNotVerifiedError("User phone number is not verified") + + sms_message = SMSMessage( + represents_alert_group=alert_group, + receiver=user, + notification_policy=notification_policy, + grafana_cloud_notification=grafana_cloud, + ) + sms_left = user.organization.sms_left(user) + + if sms_left <= 0: + sms_message.exceeded_limit = True + sms_message.save() + raise SMSMessage.SMSLimitExceeded("Organization sms limit exceeded") + + sms_message.exceeded_limit = False + if sms_left < 3: + message_body += " {} sms left. Contact your admin.".format(sms_left) + + twilio_message = twilio_client.send_message(message_body, user.verified_phone_number) + if twilio_message.status and twilio_message.sid: + sms_message.status = TwilioMessageStatuses.DETERMINANT.get(twilio_message.status, None) + sms_message.sid = twilio_message.sid + sms_message.save() + + return sms_message + @staticmethod def get_error_code_by_twilio_status(status): UserNotificationPolicyLogRecord = apps.get_model("base", "UserNotificationPolicyLogRecord") diff --git a/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py b/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py deleted file mode 100644 index 8b8f932c..00000000 --- a/engine/apps/user_management/migrations/0002_squashed_create_demo_token_instances.py +++ /dev/null @@ -1,51 +0,0 @@ -# Generated by Django 3.2.5 on 2021-08-04 10:46 - -import sys -from django.db import migrations -from apps.public_api import constants as public_api_constants -from common.constants.role import Role - - -def create_demo_token_instances(apps, schema_editor): - if not (len(sys.argv) > 1 and sys.argv[1] == 'test'): - SlackUserIdentity = apps.get_model('slack', 'SlackUserIdentity') - SlackTeamIdentity = apps.get_model('slack', 'SlackTeamIdentity') - User = apps.get_model('user_management', 'User') - Organization = apps.get_model('user_management', 'Organization') - - slack_team_identity = SlackTeamIdentity.objects.get(slack_id=public_api_constants.DEMO_SLACK_TEAM_ID) - slack_user_identity = SlackUserIdentity.objects.get( - slack_id=public_api_constants.DEMO_SLACK_USER_ID, - slack_team_identity=slack_team_identity, - ) - - organization, _ = Organization.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_ORGANIZATION_ID, - defaults=dict( - slack_team_identity=slack_team_identity, - org_id=0, stack_id=0, - ) - ) - User.objects.get_or_create( - public_primary_key=public_api_constants.DEMO_USER_ID, - defaults=dict( - username=public_api_constants.DEMO_USER_USERNAME, - email=public_api_constants.DEMO_USER_EMAIL, - organization=organization, - role=Role.ADMIN, - slack_user_identity=slack_user_identity, - user_id=0, - ) - ) - - -class Migration(migrations.Migration): - - dependencies = [ - ('user_management', '0001_squashed_initial'), - ('slack', '0003_squashed_create_demo_token_instances'), - ] - - operations = [ - migrations.RunPython(create_demo_token_instances, migrations.RunPython.noop) - ] diff --git a/engine/common/api_helpers/mixins.py b/engine/common/api_helpers/mixins.py index d121d2fd..503a477e 100644 --- a/engine/common/api_helpers/mixins.py +++ b/engine/common/api_helpers/mixins.py @@ -19,7 +19,6 @@ from apps.alerts.incident_appearance.templaters import ( TemplateLoader, ) from apps.base.messaging import get_messaging_backends -from apps.public_api.helpers import is_demo_token_request from common.api_helpers.exceptions import BadRequest from common.jinja_templater import apply_jinja_template @@ -125,83 +124,6 @@ class EagerLoadingMixin: return queryset -class DemoTokenMixin: - """ - The view mixin for requests to public api with demo token authorization. - """ - - def dispatch(self, request, *args, **kwargs): - """ - Overridden dispatch method of APIView - https://github.com/encode/django-rest-framework/blob/master/rest_framework/views.py#L485 - """ - method = request.method.lower() - - if is_demo_token_request(request) and method in ["post", "put", "delete"]: - self.args = args - self.kwargs = kwargs - request = self.initialize_request(request, *args, **kwargs) - self.request = request - - # there is a strange comment about this - # https://github.com/encode/django-rest-framework/blob/master/rest_framework/views.py#L494 - self.headers = self.default_response_headers - - try: - self.initial(request, *args, **kwargs) - - """ - check for allowed request methods - - from APIView: - If `request.method` does not correspond to a handler method, - determine what kind of exception to raise. - - def http_method_not_allowed(self, request, *args, **kwargs): - raise exceptions.MethodNotAllowed(request.method) - """ - - if method in self.http_method_names: - handler = getattr(self, method, self.http_method_not_allowed) - else: - handler = self.http_method_not_allowed - - # function comparison explanation - # https://stackoverflow.com/a/18217024 - if handler == self.http_method_not_allowed: - response = handler(request, *args, **kwargs) - - elif method == "post": - # It excludes a real instance creation. - # It returns the instance with public primary key - # is equal to demo_default_id - instance = self.model._default_manager.get(public_primary_key=self.demo_default_id) - serializer = self.get_serializer(instance) - headers = self.get_success_headers(serializer.data) - response = Response(data=serializer.data, status=status.HTTP_201_CREATED, headers=headers) - - elif method == "put": - # It excludes a instance update. - # It returns the instance with public primary key - # is equal to demo_default_id - instance = self.get_object() - serializer = self.get_serializer(instance) - headers = self.get_success_headers(serializer.data) - response = Response(data=serializer.data, status=status.HTTP_200_OK, headers=headers) - - elif method == "delete": - # In this case we return nothing just success response. - response = Response(status=status.HTTP_204_NO_CONTENT) - - except Exception as exc: - response = self.handle_exception(exc) - - self.response = self.finalize_response(request, response, *args, **kwargs) - return self.response - - return super().dispatch(request, *args, **kwargs) - - class RateLimitHeadersMixin: # This mixin add RateLimit-Reset header to RateLimited response def handle_exception(self, exc): diff --git a/engine/common/utils.py b/engine/common/utils.py index 4b9ef9c1..7507bf97 100644 --- a/engine/common/utils.py +++ b/engine/common/utils.py @@ -177,6 +177,14 @@ def clean_markup(text): return cleaned +def escape_for_twilio_phone_call(text): + # https://www.twilio.com/docs/api/errors/12100 + text = text.replace("&", "&") + text = text.replace(">", ">") + text = text.replace("<", "<") + return text + + def escape_html(text): return html.escape(text) diff --git a/engine/apps/integrations/metadata/configuration/alertmanager.py b/engine/config_integrations/alertmanager.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/alertmanager.py rename to engine/config_integrations/alertmanager.py diff --git a/engine/config_integrations/elastalert.py b/engine/config_integrations/elastalert.py new file mode 100644 index 00000000..90e9bfcc --- /dev/null +++ b/engine/config_integrations/elastalert.py @@ -0,0 +1,66 @@ +# Main +enabled = True +title = "Elastalert" +slug = "elastalert" +short_description = "Elastic" +is_displayed_on_web = True +description = None +is_featured = False +is_able_to_autoresolve = True +is_demo_alert_enabled = True + +description = None + +# Default templates +slack_title = """\ +*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} Incident>* via {{ integration_name }} +{% if source_link %} + (*<{{ source_link }}|source>*) +{%- endif %}""" + +slack_message = "```{{ payload|tojson_pretty }}```" + +slack_image_url = None + +web_title = "Incident" + +web_message = """\ +``` +{{ payload|tojson_pretty }} +``` +""" + +web_image_url = slack_image_url + +sms_title = web_title + +phone_call_title = sms_title + +email_title = web_title + +email_message = "{{ payload|tojson_pretty }}" + +telegram_title = sms_title + +telegram_message = "{{ payload|tojson_pretty }}" + +telegram_image_url = slack_image_url + +source_link = None + +grouping_id = '{{ payload.get("alert_uid", "")}}' + +resolve_condition = """\ +{%- if "is_amixr_heartbeat_restored" in payload -%} +{# We don't know the payload format from your integration. #} +{# The heartbeat alerts will go here so we check for our own key #} +{{ payload["is_amixr_heartbeat_restored"] }} +{%- else -%} +{{ payload.get("state", "").upper() == "OK" }} +{%- endif %}""" + +acknowledge_condition = None + +group_verbose_name = "Incident" + +example_payload = {"message": "This alert was sent by user for the demonstration purposes"} diff --git a/engine/apps/integrations/metadata/configuration/formatted_webhook.py b/engine/config_integrations/formatted_webhook.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/formatted_webhook.py rename to engine/config_integrations/formatted_webhook.py diff --git a/engine/apps/integrations/metadata/configuration/grafana.py b/engine/config_integrations/grafana.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/grafana.py rename to engine/config_integrations/grafana.py diff --git a/engine/apps/integrations/metadata/configuration/grafana_alerting.py b/engine/config_integrations/grafana_alerting.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/grafana_alerting.py rename to engine/config_integrations/grafana_alerting.py diff --git a/engine/apps/integrations/metadata/configuration/heartbeat.py b/engine/config_integrations/heartbeat.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/heartbeat.py rename to engine/config_integrations/heartbeat.py diff --git a/engine/apps/integrations/metadata/configuration/inbound_email.py b/engine/config_integrations/inbound_email.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/inbound_email.py rename to engine/config_integrations/inbound_email.py diff --git a/engine/config_integrations/kapacitor.py b/engine/config_integrations/kapacitor.py new file mode 100644 index 00000000..d5f013fe --- /dev/null +++ b/engine/config_integrations/kapacitor.py @@ -0,0 +1,65 @@ +# Main +enabled = True +title = "Kapacitor" +slug = "kapacitor" +short_description = "InfluxDB" +description = None +is_displayed_on_web = True +is_featured = False +is_able_to_autoresolve = True +is_demo_alert_enabled = True + +description = None + +# Default templates +slack_title = """\ +*<{{ grafana_oncall_link }}|#{{ grafana_oncall_incident_id }} {{ payload.get("id", "Title undefined (check Slack Title Template)") }}>* via {{ integration_name }} +{% if source_link %} + (*<{{ source_link }}|source>*) +{%- endif %}""" + +slack_message = """\ +```{{ payload|tojson_pretty }}``` +""" + +slack_image_url = None + +web_title = '{{ payload.get("id", "Title undefined (check Web Title Template)") }}' + +web_message = """\ +``` +{{ payload|tojson_pretty }} +``` +""" + +web_image_url = slack_image_url + +sms_title = web_title + +phone_call_title = web_title + +email_title = web_title + +email_message = slack_message + +telegram_title = sms_title + +telegram_message = "{{ payload|tojson_pretty }}" + +telegram_image_url = slack_image_url + +source_link = None + +grouping_id = '{{ payload.get("id", "") }}' + +resolve_condition = '{{ payload.get("level", "").startswith("OK") }}' + +acknowledge_condition = None + +group_verbose_name = '{{ payload.get("id", "") }}' + +example_payload = { + "id": "TestAlert", + "message": "This alert was sent by user for the demonstration purposes", + "data": "{foo: bar}", +} diff --git a/engine/apps/integrations/metadata/configuration/maintenance.py b/engine/config_integrations/maintenance.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/maintenance.py rename to engine/config_integrations/maintenance.py diff --git a/engine/apps/integrations/metadata/configuration/manual.py b/engine/config_integrations/manual.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/manual.py rename to engine/config_integrations/manual.py diff --git a/engine/apps/integrations/metadata/configuration/slack_channel.py b/engine/config_integrations/slack_channel.py similarity index 100% rename from engine/apps/integrations/metadata/configuration/slack_channel.py rename to engine/config_integrations/slack_channel.py diff --git a/engine/apps/integrations/metadata/configuration/webhook.py b/engine/config_integrations/webhook.py similarity index 96% rename from engine/apps/integrations/metadata/configuration/webhook.py rename to engine/config_integrations/webhook.py index ea18fab7..113efc56 100644 --- a/engine/apps/integrations/metadata/configuration/webhook.py +++ b/engine/config_integrations/webhook.py @@ -56,7 +56,7 @@ resolve_condition = """\ {# The heartbeat alerts will go here so we check for our own key #} {{ payload["is_amixr_heartbeat_restored"] }} {%- else -%} -{{ payload.get("state", "").upper() == "OK" }}' +{{ payload.get("state", "").upper() == "OK" }} {%- endif %}""" acknowledge_condition = None diff --git a/engine/engine/urls.py b/engine/engine/urls.py index 9e55241a..518c5608 100644 --- a/engine/engine/urls.py +++ b/engine/engine/urls.py @@ -54,7 +54,7 @@ if settings.FEATURE_SLACK_INTEGRATION_ENABLED: path("slack/", include("apps.slack.urls")), ] -if settings.OSS_INSTALLATION_FEATURES_ENABLED: +if settings.OSS_INSTALLATION: urlpatterns += [ path("api/internal/v1/", include("apps.oss_installation.urls")), ] diff --git a/engine/requirements.txt b/engine/requirements.txt index a9dfc03d..1aaf78c5 100644 --- a/engine/requirements.txt +++ b/engine/requirements.txt @@ -9,7 +9,6 @@ celery==4.3.0 redis==3.2.0 django-celery-results==1.0.4 humanize==0.5.1 -django-mysql==2.4.1 uwsgi==2.0.20 django-cors-headers==3.7.0 django-debug-toolbar==3.2.1 @@ -39,3 +38,5 @@ django-rest-polymorphic==0.1.9 pre-commit==2.15.0 https://github.com/iskhakov/django-push-notifications/archive/refs/tags/2.0.0-hotfix-4.tar.gz django-mirage-field==1.3.0 +django-mysql==4.6.0 +PyMySQL==1.0.2 diff --git a/engine/scripts/start_all_in_one.sh b/engine/scripts/start_all_in_one.sh deleted file mode 100644 index f4a64e39..00000000 --- a/engine/scripts/start_all_in_one.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -export DJANGO_SETTINGS_MODULE=settings.all_in_one - -generate_value_if_not_exist () -{ - if [ ! -f /etc/app/secret_data/$1 ]; then - touch /etc/app/secret_data/$1 - base64 /dev/urandom | head -c $2 > /etc/app/secret_data/$1 -fi -export $1=$(cat /etc/app/secret_data/$1) -} - -generate_value_if_not_exist SECRET_KEY 75 - -generate_value_if_not_exist MIRAGE_SECRET_KEY 75 -generate_value_if_not_exist MIRAGE_CIPHER_IV 16 - -export BASE_URL=http://localhost:8000 - -echo "Starting redis in the background" -# Redis will dump the changes to the volume every 60 seconds if at least 1 key changed -redis-server --daemonize yes --save 60 1 --dir /etc/app/redis_data/ -echo "Running migrations" -python manage.py migrate - -echo "Start celery" -python manage.py start_celery & - -# Postponing token issuing to make sure it's the last record in the console. -bash -c 'sleep 10; python manage.py issue_invite_for_the_frontend --override' & - -echo "Starting server" -python manage.py runserver 0.0.0.0:8000 --noreload diff --git a/engine/settings/all_in_one.py b/engine/settings/all_in_one.py index e2196274..5c90e3e3 100644 --- a/engine/settings/all_in_one.py +++ b/engine/settings/all_in_one.py @@ -1,5 +1,4 @@ import sys -from random import randrange from .prod_without_db import * # noqa @@ -37,22 +36,3 @@ CELERY_BROKER_URL = "redis://localhost:6379/0" if TESTING: TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX" TWILIO_AUTH_TOKEN = "twilio_auth_token" - -# TODO: OSS: Add these setting to oss settings file. Add Version there too. -OSS_INSTALLATION_FEATURES_ENABLED = True - -INSTALLED_APPS += ["apps.oss_installation"] # noqa - -CELERY_BEAT_SCHEDULE["send_usage_stats"] = { # noqa - "task": "apps.oss_installation.tasks.send_usage_stats_report", - "schedule": crontab(hour=0, minute=randrange(0, 59)), # Send stats report at a random minute past midnight # noqa - "args": (), -} # noqa - -CELERY_BEAT_SCHEDULE["send_cloud_heartbeat"] = { # noqa - "task": "apps.oss_installation.tasks.send_cloud_heartbeat", - "schedule": crontab(minute="*/3"), # noqa - "args": (), -} # noqa - -SEND_ANONYMOUS_USAGE_STATS = True diff --git a/engine/settings/base.py b/engine/settings/base.py index b2150a47..16d605ac 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -1,12 +1,15 @@ import os -from urllib.parse import urljoin +from random import randrange from celery.schedules import crontab from common.utils import getenv_boolean VERSION = "dev-oss" -SEND_ANONYMOUS_USAGE_STATS = False +# Indicates if instance is OSS installation. +# It is needed to plug-in oss application and urls. +OSS_INSTALLATION = getenv_boolean("GRAFANA_ONCALL_OSS_INSTALLATION", True) +SEND_ANONYMOUS_USAGE_STATS = getenv_boolean("SEND_ANONYMOUS_USAGE_STATS", default=True) # License is OpenSource or Cloud OPEN_SOURCE_LICENSE_NAME = "OpenSource" @@ -46,17 +49,18 @@ BASE_URL = os.environ.get("BASE_URL") # Root URL of OnCall backend # Feature toggles FEATURE_LIVE_SETTINGS_ENABLED = getenv_boolean("FEATURE_LIVE_SETTINGS_ENABLED", default=True) -FEATURE_TELEGRAM_INTEGRATION_ENABLED = getenv_boolean("FEATURE_TELEGRAM_INTEGRATION_ENABLED", default=False) +FEATURE_TELEGRAM_INTEGRATION_ENABLED = getenv_boolean("FEATURE_TELEGRAM_INTEGRATION_ENABLED", default=True) FEATURE_EMAIL_INTEGRATION_ENABLED = getenv_boolean("FEATURE_EMAIL_INTEGRATION_ENABLED", default=False) -FEATURE_SLACK_INTEGRATION_ENABLED = getenv_boolean("FEATURE_SLACK_INTEGRATION_ENABLED", default=False) -OSS_INSTALLATION_FEATURES_ENABLED = False +FEATURE_SLACK_INTEGRATION_ENABLED = getenv_boolean("FEATURE_SLACK_INTEGRATION_ENABLED", default=True) +GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED = getenv_boolean("GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED", default=True) +GRAFANA_CLOUD_NOTIFICATIONS_ENABLED = getenv_boolean("GRAFANA_CLOUD_NOTIFICATIONS_ENABLED", default=True) TWILIO_ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID") TWILIO_AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN") TWILIO_NUMBER = os.environ.get("TWILIO_NUMBER") TWILIO_VERIFY_SERVICE_SID = os.environ.get("TWILIO_VERIFY_SERVICE_SID") -TELEGRAM_WEBHOOK_URL = os.environ.get("TELEGRAM_WEBHOOK_URL", urljoin(BASE_URL, "/telegram/")) +TELEGRAM_WEBHOOK_HOST = os.environ.get("TELEGRAM_WEBHOOK_HOST", BASE_URL) TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN") os.environ.setdefault("MYSQL_PASSWORD", "empty") @@ -70,6 +74,10 @@ SENDGRID_FROM_EMAIL = os.environ.get("SENDGRID_FROM_EMAIL") SENDGRID_SECRET_KEY = os.environ.get("SENDGRID_SECRET_KEY") SENDGRID_INBOUND_EMAIL_DOMAIN = os.environ.get("SENDGRID_INBOUND_EMAIL_DOMAIN") +# For Grafana Cloud integration +GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get("GRAFANA_CLOUD_ONCALL_API_URL", "https://a-prod-us-central-0.grafana.net") +GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None) + # Application definition INSTALLED_APPS = [ @@ -409,10 +417,6 @@ SELF_HOSTED_SETTINGS = { "ORG_TITLE": "Self-Hosted Organization", } -GRAFANA_CLOUD_ONCALL_API_URL = os.environ.get("GRAFANA_CLOUD_ONCALL_API_URL", "https://a-prod-us-central-0.grafana.net") -GRAFANA_CLOUD_ONCALL_TOKEN = os.environ.get("GRAFANA_CLOUD_ONCALL_TOKEN", None) -GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED = getenv_boolean("GRAFANA_CLOUD_ONCALL_HEARTBEAT_ENABLED", default=True) - GRAFANA_INCIDENT_STATIC_API_KEY = os.environ.get("GRAFANA_INCIDENT_STATIC_API_KEY", None) DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880 @@ -424,15 +428,39 @@ FEATURE_EXTRA_MESSAGING_BACKENDS_ENABLED = getenv_boolean("FEATURE_EXTRA_MESSAGI EXTRA_MESSAGING_BACKENDS = [] INSTALLED_ONCALL_INTEGRATIONS = [ - "apps.integrations.metadata.configuration.alertmanager", - "apps.integrations.metadata.configuration.grafana", - "apps.integrations.metadata.configuration.grafana_alerting", - "apps.integrations.metadata.configuration.formatted_webhook", - "apps.integrations.metadata.configuration.webhook", - "apps.integrations.metadata.configuration.amazon_sns", - "apps.integrations.metadata.configuration.heartbeat", - "apps.integrations.metadata.configuration.inbound_email", - "apps.integrations.metadata.configuration.maintenance", - "apps.integrations.metadata.configuration.manual", - "apps.integrations.metadata.configuration.slack_channel", + "config_integrations.alertmanager", + "config_integrations.grafana", + "config_integrations.grafana_alerting", + "config_integrations.formatted_webhook", + "config_integrations.webhook", + "config_integrations.kapacitor", + "config_integrations.elastalert", + "config_integrations.heartbeat", + "config_integrations.inbound_email", + "config_integrations.maintenance", + "config_integrations.manual", + "config_integrations.slack_channel", ] + +if OSS_INSTALLATION: + INSTALLED_APPS += ["apps.oss_installation"] # noqa + + CELERY_BEAT_SCHEDULE["send_usage_stats"] = { # noqa + "task": "apps.oss_installation.tasks.send_usage_stats_report", + "schedule": crontab( + hour=0, minute=randrange(0, 59) + ), # Send stats report at a random minute past midnight # noqa + "args": (), + } # noqa + + CELERY_BEAT_SCHEDULE["send_cloud_heartbeat"] = { # noqa + "task": "apps.oss_installation.tasks.send_cloud_heartbeat_task", + "schedule": crontab(minute="*/3"), # noqa + "args": (), + } # noqa + + CELERY_BEAT_SCHEDULE["sync_users_with_cloud"] = { # noqa + "task": "apps.oss_installation.tasks.sync_users_with_cloud", + "schedule": crontab(hour="*/12"), # noqa + "args": (), + } # noqa diff --git a/engine/settings/dev.py b/engine/settings/dev.py index aff8ca9d..ef43c7ff 100644 --- a/engine/settings/dev.py +++ b/engine/settings/dev.py @@ -1,6 +1,9 @@ import os import sys +# Workaround to use pymysql instead of mysqlclient +import pymysql + from .base import * # noqa SECRET_KEY = os.environ.get("SECRET_KEY", "osMsNM0PqlRHBlUvqmeJ7+ldU3IUETCrY9TrmiViaSmInBHolr1WUlS0OFS4AHrnnkp1vp9S9z1") @@ -10,11 +13,20 @@ MIRAGE_SECRET_KEY = os.environ.get( ) MIRAGE_CIPHER_IV = os.environ.get("MIRAGE_CIPHER_IV", "tZZa+60zTZO2NRcS") -# Primary database must have the name "default" +pymysql.install_as_MySQLdb() + DATABASES = { "default": { - "ENGINE": "django.db.backends.sqlite3", - "NAME": os.path.join(BASE_DIR, "sqlite_data/db.sqlite3"), # noqa + "ENGINE": "django.db.backends.mysql", + "NAME": os.environ.get("MYSQL_DB_NAME", "oncall_local_dev"), + "USER": os.environ.get("MYSQL_USER", "root"), + "PASSWORD": os.environ.get("MYSQL_PASSWORD"), + "HOST": os.environ.get("MYSQL_HOST", "127.0.0.1"), + "PORT": os.environ.get("MYSQL_PORT", "3306"), + "OPTIONS": { + "charset": "utf8mb4", + "connect_timeout": 1, + }, }, } diff --git a/engine/settings/hobby.py b/engine/settings/hobby.py new file mode 100644 index 00000000..3bd73c13 --- /dev/null +++ b/engine/settings/hobby.py @@ -0,0 +1,38 @@ +# flake8: noqa: F405 + +from random import randrange + +# Workaround to use pymysql instead of mysqlclient +import pymysql + +from .prod_without_db import * # noqa + +pymysql.install_as_MySQLdb() + +DATABASES = { + "default": { + "ENGINE": "django.db.backends.mysql", + "NAME": os.environ.get("MYSQL_DB_NAME"), + "USER": os.environ.get("MYSQL_USER"), + "PASSWORD": os.environ["MYSQL_PASSWORD"], + "HOST": os.environ.get("MYSQL_HOST"), + "PORT": os.environ.get("MYSQL_PORT"), + "OPTIONS": { + "charset": "utf8mb4", + "connect_timeout": 1, + }, + }, +} + +RABBITMQ_USERNAME = os.environ.get("RABBITMQ_USERNAME") +RABBITMQ_PASSWORD = os.environ.get("RABBITMQ_PASSWORD") +RABBITMQ_HOST = os.environ.get("RABBITMQ_HOST") +RABBITMQ_PORT = os.environ.get("RABBITMQ_PORT") + +CELERY_BROKER_URL = f"amqp://{RABBITMQ_USERNAME}:{RABBITMQ_PASSWORD}@{RABBITMQ_HOST}:{RABBITMQ_PORT}" + +MIRAGE_SECRET_KEY = SECRET_KEY +MIRAGE_CIPHER_IV = "1234567890abcdef" # use default + +APPEND_SLASH = False +SECURE_SSL_REDIRECT = False diff --git a/grafana-plugin/src/GrafanaPluginRootPage.tsx b/grafana-plugin/src/GrafanaPluginRootPage.tsx index a3276a5f..b6f0cc65 100644 --- a/grafana-plugin/src/GrafanaPluginRootPage.tsx +++ b/grafana-plugin/src/GrafanaPluginRootPage.tsx @@ -48,7 +48,7 @@ const RootWithLoader = observer((props: AppRootProps) => { } else if (store.isUserAnonymous) { text = '😞 Unfortunately Grafana OnCall is available for authorized users only, please sign in to proceed.'; } else if (store.retrySync) { - text = `🚫 OnCall took too many tries to synchronize`; + text = `🚫 OnCall took too many tries to synchronize... Are background workers up and running?`; } return ( @@ -100,9 +100,11 @@ export const Root = observer((props: AppRootProps) => { const style = document.createElement('style'); document.head.appendChild(style); const index = style.sheet.insertRule('.page-body {max-width: unset !important}'); + const index2 = style.sheet.insertRule('.page-container {max-width: unset !important}'); return () => { style.sheet.removeRule(index); + style.sheet.removeRule(index2); }; }, []); @@ -116,6 +118,7 @@ export const Root = observer((props: AppRootProps) => { meta, grafanaUser: window.grafanaBootData.user, enableLiveSettings: store.hasFeature(AppFeature.LiveSettings), + enableCloudPage: store.hasFeature(AppFeature.CloudConnection), }), [meta, pathWithoutLeadingSlash, page, store.features] ) diff --git a/grafana-plugin/src/components/Policy/NotificationPolicy.tsx b/grafana-plugin/src/components/Policy/NotificationPolicy.tsx index 040f4b43..29254a04 100644 --- a/grafana-plugin/src/components/Policy/NotificationPolicy.tsx +++ b/grafana-plugin/src/components/Policy/NotificationPolicy.tsx @@ -35,7 +35,7 @@ export interface NotificationPolicyProps { waitDelays?: WaitDelay[]; notifyByOptions?: NotifyBy[]; telegramVerified: boolean; - phoneVerified: boolean; + phoneStatus: number; color: string; number: number; userAction: UserAction; @@ -115,13 +115,21 @@ export class NotificationPolicy extends React.ComponentPhone number is verified - ) : ( - Phone number is not verified - ); + switch (phoneStatus) { + case 0: + return Cloud is not synced; + case 1: + return User is not matched with cloud; + case 2: + return Phone number is not verified; + case 3: + return Phone number is verified; + + default: + return null; + } } _renderTelegramNote() { diff --git a/grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.tsx b/grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.tsx index 4812b0ae..a9d2205e 100644 --- a/grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.tsx +++ b/grafana-plugin/src/containers/PersonalNotificationSettings/PersonalNotificationSettings.tsx @@ -12,6 +12,7 @@ import Timeline from 'components/Timeline/Timeline'; import { WithPermissionControl } from 'containers/WithPermissionControl/WithPermissionControl'; import { NotificationPolicyType } from 'models/notification_policy'; import { User as UserType } from 'models/user/user.types'; +import { AppFeature } from 'state/features'; import { useStore } from 'state/useStore'; import { UserAction } from 'state/userAction'; @@ -105,6 +106,12 @@ const PersonalNotificationSettings = observer((props: PersonalNotificationSettin const user = userStore.items[userPk]; const userAction = isCurrent ? UserAction.UpdateOwnSettings : UserAction.UpdateNotificationPolicies; + const getPhoneStatus = () => { + if (store.hasFeature(AppFeature.CloudNotifications)) { + return user.cloud_connection_status; + } + return Number(user.verified_phone_number) + 2; + }; return (
@@ -124,7 +131,7 @@ const PersonalNotificationSettings = observer((props: PersonalNotificationSettin index={index} number={index + 1} telegramVerified={Boolean(user.telegram_configuration)} - phoneVerified={Boolean(user && user.verified_phone_number)} + phoneStatus={getPhoneStatus()} slackTeamIdentity={store.teamStore.currentTeam?.slack_team_identity} slackUserIdentity={user.slack_user_identity} data={notificationPolicy} diff --git a/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.module.css b/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.module.css index 8996189b..d0eafdf8 100644 --- a/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.module.css +++ b/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.module.css @@ -1,7 +1,8 @@ -.delete_configuration_button { - margin-top: 20px; -} - .command-line { width: 100%; } + +.info-block { + margin-bottom: 24px; + margin-top: 24px; +} diff --git a/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.tsx b/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.tsx index e3ccfc4c..5ca6d87d 100644 --- a/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.tsx +++ b/grafana-plugin/src/containers/PluginConfigPage/PluginConfigPage.tsx @@ -19,6 +19,7 @@ import cn from 'classnames/bind'; import CopyToClipboard from 'react-copy-to-clipboard'; import { OnCallAppSettings } from 'types'; +import Block from 'components/GBlock/Block'; import Text from 'components/Text/Text'; import WithConfirm from 'components/WithConfirm/WithConfirm'; import logo from 'img/logo.svg'; @@ -34,23 +35,20 @@ const cx = cn.bind(styles); interface Props extends PluginConfigPageProps> {} export const PluginConfigPage = (props: Props) => { + const grafanaUrlDefault = getItem('grafanaUrl') || window.location.origin; const { plugin } = props; const [onCallApiUrl, setOnCallApiUrl] = useState(getItem('onCallApiUrl')); const [onCallInvitationToken, setOnCallInvitationToken] = useState(); - const [grafanaUrl, setGrafanaUrl] = useState(window.location.origin); + const [grafanaUrl, setGrafanaUrl] = useState(grafanaUrlDefault); const [pluginConfigLoading, setPluginConfigLoading] = useState(true); const [pluginStatusOk, setPluginStatusOk] = useState(); const [pluginStatusMessage, setPluginStatusMessage] = useState(); const [isSelfHostedInstall, setIsSelfHostedInstall] = useState(true); const [retrySync, setRetrySync] = useState(false); - const [showConfirmationModal, setShowConfirmationModal] = useState(false); - const configurePlugin = () => { - setShowConfirmationModal(true); - }; const setupPlugin = useCallback(async () => { setItem('onCallApiUrl', onCallApiUrl); - setShowConfirmationModal(false); + setItem('grafanaUrl', grafanaUrl); await getBackendSrv().post(`/api/plugins/grafana-oncall-app/settings`, { enabled: true, pinned: true, @@ -189,7 +187,9 @@ export const PluginConfigPage = (props: Props) => { if (counter >= 5) { clearInterval(interval); - setPluginStatusMessage(`OnCall took too many tries to synchronize.`); + setPluginStatusMessage( + `OnCall took too many tries to synchronize. Did you launch Celery workers? Background workers should perform synchronization, not web server.` + ); setRetrySync(true); setPluginStatusOk(false); setPluginConfigLoading(false); @@ -212,85 +212,70 @@ export const PluginConfigPage = (props: Props) => { Configure Grafana OnCall {pluginStatusOk && (

- Configuration was sucessfully created. Now you can find Grafana OnCall on right toolbar.{' '} + Plugin and the backend are connected! Check Grafana OnCall 👈👈👈{' '} Grafana OnCall Logo

)} +

{'Plugin <-> backend connection status'}

+
+            {pluginStatusMessage}
+          
- {isSelfHostedInstall ? ( -
-

{'Plugin <-> backend connection status'}

+ + {/*

{'Plugin <-> backend connection status'}

                 {pluginStatusMessage}
-              
+ */} + {retrySync && ( + + )} + {isSelfHostedInstall ? ( - -
- ) : ( - - )} + ) : ( + + )}{' '} + ) : ( Configure Grafana OnCall

This page will help you to connect OnCall backend and OnCall Grafana plugin 👋

-

1. Grafana OnCall is a Grafana plugin and backend. Run backend

+ +

1. Launch backend

- Run production backend using{' '} - - this instructions at our GitHub + Run hobby, dev or production backend:{' '} + + getting started. - , - - Or run the local one: -
-              
-                 {
-                    openNotification('Grafana OnCall command copied');
-                  }}
-                >
-                  
-                {' '}
-                docker build -t grafana/amixr-all-in-one -f Dockerfile.all-in-one .
-              
-            
- - - Need help? -
- 1. Talk to the developers in the #grafana-oncall channel at{' '} - - Slack - -
- 2. Search for issues or create a new one in the{' '} - - GitHub - -
- - } - /> + + + Need help? +
- Talk to the OnCall team in the #grafana-oncall channel at{' '} + + Slack + +
- Ask questions at{' '} + + GitHub Discussions + {' '} + or file bugs at{' '} + + GitHub Issues + +
+

2. Conect the backend and the plugin

-

{'Plugin <-> backend connection status'}

+

{'Plugin <-> backend connection status:'}

             {pluginStatusMessage}
           
@@ -301,7 +286,7 @@ Seek for such a line: “Your invite token: <> , use it in the Graf > <> - + How to re-issue the invite token? @@ -311,42 +296,29 @@ Seek for such a line: “Your invite token: <> , use it in the Graf + It should be rechable from Grafana. Possible options:
+ http://host.docker.internal:8000 (if you run backend in the docker locally) +
+ http://localhost:8000
+ ... + + } >
- + - {/* */} - {/* */} - {showConfirmationModal && ( - setShowConfirmationModal(false)} - > - - - - - - )}
)}
diff --git a/grafana-plugin/src/containers/UserSettings/UserSettings.tsx b/grafana-plugin/src/containers/UserSettings/UserSettings.tsx index ca00871b..20abb1f2 100644 --- a/grafana-plugin/src/containers/UserSettings/UserSettings.tsx +++ b/grafana-plugin/src/containers/UserSettings/UserSettings.tsx @@ -58,7 +58,8 @@ const UserSettings = observer((props: UserFormProps) => { setActiveTab(tab); }, []); - const isModalWide = activeTab === UserSettingsTab.UserInfo && isDesktopOrLaptop; + const isModalWide = + (activeTab === UserSettingsTab.UserInfo && isDesktopOrLaptop) || activeTab === UserSettingsTab.PhoneVerification; const [showNotificationSettingsTab, showSlackConnectionTab, showTelegramConnectionTab, showMobileAppVerificationTab] = [ diff --git a/grafana-plugin/src/containers/UserSettings/parts/connectors/PhoneConnector.tsx b/grafana-plugin/src/containers/UserSettings/parts/connectors/PhoneConnector.tsx index b00cf868..05ec4509 100644 --- a/grafana-plugin/src/containers/UserSettings/parts/connectors/PhoneConnector.tsx +++ b/grafana-plugin/src/containers/UserSettings/parts/connectors/PhoneConnector.tsx @@ -1,11 +1,12 @@ import React, { useCallback } from 'react'; -import { Button, Label } from '@grafana/ui'; +import { Button, Label, VerticalGroup } from '@grafana/ui'; import cn from 'classnames/bind'; import Text from 'components/Text/Text'; import { UserSettingsTab } from 'containers/UserSettings/UserSettings.types'; import { User } from 'models/user/user.types'; +import { AppFeature } from 'state/features'; import { useStore } from 'state/useStore'; import styles from './index.module.css'; @@ -29,31 +30,85 @@ const PhoneConnector = (props: PhoneConnectorProps) => { onTabChange(UserSettingsTab.PhoneVerification); }, [storeUser?.unverified_phone_number]); + const cloudVersionPhone = (user: User) => { + switch (user.cloud_connection_status) { + case 0: + return Cloud is not synced; + + case 1: + return ( + + User is not matched with cloud + + + ); + + case 2: + return ( + + Phone number is not verified in Grafana Cloud + + + ); + case 3: + return ( + + Phone number verified + + + ); + default: + return ( + + User is not matched with cloud + + + ); + } + }; + return (
- - {storeUser.verified_phone_number || '—'} - {storeUser.verified_phone_number ? ( -
- Phone number is verified - -
- ) : storeUser.unverified_phone_number ? ( -
- Phone number is not verified - -
+ {store.hasFeature(AppFeature.CloudNotifications) ? ( + <> + + {cloudVersionPhone(storeUser)} + ) : ( -
- Phone number is not added - -
+ <> + + {storeUser.verified_phone_number || '—'} + {storeUser.verified_phone_number ? ( +
+ Phone number is verified + +
+ ) : storeUser.unverified_phone_number ? ( +
+ Phone number is not verified + +
+ ) : ( +
+ Phone number is not added + +
+ )} + )}
); diff --git a/grafana-plugin/src/containers/UserSettings/parts/connectors/index.module.css b/grafana-plugin/src/containers/UserSettings/parts/connectors/index.module.css index 0e32b304..04f4550e 100644 --- a/grafana-plugin/src/containers/UserSettings/parts/connectors/index.module.css +++ b/grafana-plugin/src/containers/UserSettings/parts/connectors/index.module.css @@ -30,3 +30,7 @@ .warning-icon { color: var(--warning-text-color); } + +.error-message { + color: var(--error-text-color); +} diff --git a/grafana-plugin/src/containers/UserSettings/parts/index.tsx b/grafana-plugin/src/containers/UserSettings/parts/index.tsx index 19e598ed..82e57fd6 100644 --- a/grafana-plugin/src/containers/UserSettings/parts/index.tsx +++ b/grafana-plugin/src/containers/UserSettings/parts/index.tsx @@ -1,17 +1,20 @@ -import React, { useCallback } from 'react'; +import React, { useCallback, useEffect } from 'react'; import { Tab, TabContent, TabsBar } from '@grafana/ui'; import cn from 'classnames/bind'; +import { observer } from 'mobx-react'; import Block from 'components/GBlock/Block'; import MobileAppVerification from 'containers/MobileAppVerification/MobileAppVerification'; import { UserSettingsTab } from 'containers/UserSettings/UserSettings.types'; import { SlackTab } from 'containers/UserSettings/parts/tabs//SlackTab/SlackTab'; +import CloudPhoneSettings from 'containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings'; import { NotificationSettingsTab } from 'containers/UserSettings/parts/tabs/NotificationSettingsTab'; import PhoneVerification from 'containers/UserSettings/parts/tabs/PhoneVerification/PhoneVerification'; import TelegramInfo from 'containers/UserSettings/parts/tabs/TelegramInfo/TelegramInfo'; import { UserInfoTab } from 'containers/UserSettings/parts/tabs/UserInfoTab/UserInfoTab'; import { User } from 'models/user/user.types'; +import { AppFeature } from 'state/features'; import { useStore } from 'state/useStore'; import styles from 'containers/UserSettings/parts/index.module.css'; @@ -100,8 +103,11 @@ interface TabsContentProps { isDesktopOrLaptop: boolean; } -export const TabsContent = (props: TabsContentProps) => { +export const TabsContent = observer((props: TabsContentProps) => { const { id, activeTab, onTabChange, isDesktopOrLaptop } = props; + useEffect(() => { + store.updateFeatures(); + }, []); const store = useStore(); const { userStore } = store; @@ -124,9 +130,12 @@ export const TabsContent = (props: TabsContentProps) => { ))} {activeTab === UserSettingsTab.NotificationSettings && } - {activeTab === UserSettingsTab.PhoneVerification && ( - - )} + {activeTab === UserSettingsTab.PhoneVerification && + (store.hasFeature(AppFeature.CloudNotifications) ? ( + + ) : ( + + ))} {activeTab === UserSettingsTab.MobileAppVerification && ( )} @@ -134,4 +143,4 @@ export const TabsContent = (props: TabsContentProps) => { {activeTab === UserSettingsTab.TelegramInfo && } ); -}; +}); diff --git a/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.module.css b/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.module.css new file mode 100644 index 00000000..ab86c434 --- /dev/null +++ b/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.module.css @@ -0,0 +1,3 @@ +.test { + color: grey; +} diff --git a/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.tsx b/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.tsx new file mode 100644 index 00000000..724b4712 --- /dev/null +++ b/grafana-plugin/src/containers/UserSettings/parts/tabs/CloudPhoneSettings/CloudPhoneSettings.tsx @@ -0,0 +1,168 @@ +import React, { useCallback, useEffect, useState } from 'react'; + +import { getLocationSrv, LocationUpdate } from '@grafana/runtime'; +import { + Field, + Input, + Button, + Modal, + HorizontalGroup, + Alert, + Icon, + VerticalGroup, + Table, + LoadingPlaceholder, +} from '@grafana/ui'; +import cn from 'classnames/bind'; +import { observer } from 'mobx-react'; + +import Block from 'components/GBlock/Block'; +import GTable from 'components/GTable/GTable'; +import PluginLink from 'components/PluginLink/PluginLink'; +import Text from 'components/Text/Text'; +import WithConfirm from 'components/WithConfirm/WithConfirm'; +import { User } from 'models/user/user.types'; +import { AppFeature } from 'state/features'; +import { WithStoreProps } from 'state/types'; +import { useStore } from 'state/useStore'; +import { UserAction } from 'state/userAction'; +import { withMobXProviderContext } from 'state/withStore'; + +import styles from './CloudPhoneSettings.module.css'; + +const cx = cn.bind(styles); + +interface CloudPhoneSettingsProps extends WithStoreProps { + userPk?: User['pk']; +} + +const CloudPhoneSettings = observer((props: CloudPhoneSettingsProps) => { + const { userPk } = props; + const store = useStore(); + const [syncing, setSyncing] = useState(false); + const [userStatus, setUserStatus] = useState(0); + const [userLink, setUserLink] = useState(null); + + useEffect(() => { + getCloudUserInfo(); + }, []); + + const handleLinkClick = (link: string) => { + window.location.replace(link); + }; + + const syncUser = async () => { + setSyncing(true); + await store.cloudStore.syncCloudUser(userPk); + const cloudUser = await store.cloudStore.getCloudUser(userPk); + setUserStatus(cloudUser?.cloud_data?.status); + setUserLink(cloudUser?.cloud_data?.link); + setSyncing(false); + }; + + const getCloudUserInfo = async () => { + const cloudUser = await store.cloudStore.getCloudUser(userPk); + setUserStatus(cloudUser?.cloud_data?.status); + setUserLink(cloudUser?.cloud_data?.link); + }; + + const UserCloudStatus = () => { + switch (userStatus) { + case 0: + if (store.hasFeature(AppFeature.CloudNotifications)) { + return ( + + Your account successfully matched, but Cloud is not connected. + + + + + ); + } + return ( + + Grafana Cloud is not synced + + ); + case 1: + return ( + + + { + 'We can’t find a matching account in the connected Grafana Cloud instance (matching happens by e-mail). ' + } + + + + ); + case 2: + return ( + + + Your account successfully matched with the Grafana Cloud account. Please verify your phone number.{' '} + + + + ); + case 3: + return ( + + + Your account successfully matched with the Grafana Cloud account. Your phone number is verified.{' '} + + + + ); + default: + return ( + + + { + 'We can’t find a matching account in the connected Grafana Cloud instance (matching happens by e-mail). ' + } + + + + ); + } + }; + + return ( + <> + {store.isUserActionAllowed(UserAction.UpdateOtherUsersSettings) ? ( + + + OnCall use Grafana Cloud for SMS and phone call notifications + {syncing ? ( + + ) : ( + + )} + + {!syncing ? : } + + ) : ( + + OnCall use Grafana Cloud for SMS and phone call notifications + You do not have permission to perform this action. Ask an admin to upgrade your permissions. + + )} + + ); +}); + +export default withMobXProviderContext(CloudPhoneSettings); diff --git a/grafana-plugin/src/icons/cross-circled.svg b/grafana-plugin/src/icons/cross-circled.svg new file mode 100644 index 00000000..f468d638 --- /dev/null +++ b/grafana-plugin/src/icons/cross-circled.svg @@ -0,0 +1,8 @@ + + + diff --git a/grafana-plugin/src/icons/heart-line.svg b/grafana-plugin/src/icons/heart-line.svg new file mode 100644 index 00000000..6c063e81 --- /dev/null +++ b/grafana-plugin/src/icons/heart-line.svg @@ -0,0 +1,24 @@ + + + + + + + diff --git a/grafana-plugin/src/icons/index.tsx b/grafana-plugin/src/icons/index.tsx index fc1b0d3a..7b77d8f6 100644 --- a/grafana-plugin/src/icons/index.tsx +++ b/grafana-plugin/src/icons/index.tsx @@ -168,6 +168,42 @@ export const HeartRedIcon = (props: IconProps) => ( ); +export const HeartIcon = (props: IconProps) => ( + + + + + +); + +export const CrossCircleIcon = (props: IconProps) => ( + + + +); + export const GrafanaIcon = (props: IconProps) => ( - + - + diff --git a/grafana-plugin/src/index.css b/grafana-plugin/src/index.css index 93b9dfe1..eeeff2de 100644 --- a/grafana-plugin/src/index.css +++ b/grafana-plugin/src/index.css @@ -30,13 +30,13 @@ background: var(--highlighted-row-bg); } -@media (max-width: 1440px) { +@media (max-width: 1540px) { .page-header__tabs > ul > li > a > div { display: none; } } -@media (max-width: 1200px) { +@media (max-width: 1300px) { .sidemenu { position: fixed !important; height: 100%; diff --git a/grafana-plugin/src/models/alertgroup/alertgroup.ts b/grafana-plugin/src/models/alertgroup/alertgroup.ts index 684948a1..ba284236 100644 --- a/grafana-plugin/src/models/alertgroup/alertgroup.ts +++ b/grafana-plugin/src/models/alertgroup/alertgroup.ts @@ -68,7 +68,7 @@ export class AlertGroupStore extends BaseStore { constructor(rootStore: RootStore) { super(rootStore); - this.path = '/alertgroups1/'; + this.path = '/alertgroups/'; } async attachAlert(pk: Alert['pk'], rootPk: Alert['pk']) { diff --git a/grafana-plugin/src/models/base_store.ts b/grafana-plugin/src/models/base_store.ts index 9af0c5d4..ab46fe3c 100644 --- a/grafana-plugin/src/models/base_store.ts +++ b/grafana-plugin/src/models/base_store.ts @@ -52,10 +52,11 @@ export default class BaseStore { } @action - async update(id: any, data: any) { + async update(id: any, data: any, params: any = null) { const result = await makeRequest(`${this.path}${id}/`, { method: 'PUT', data, + params: params, }).catch(this.onApiError); // Update env_status field for current team diff --git a/grafana-plugin/src/models/cloud/cloud.ts b/grafana-plugin/src/models/cloud/cloud.ts new file mode 100644 index 00000000..fa19125a --- /dev/null +++ b/grafana-plugin/src/models/cloud/cloud.ts @@ -0,0 +1,83 @@ +import { get } from 'lodash-es'; +import { action, computed, observable } from 'mobx'; + +import BaseStore from 'models/base_store'; +import { NotificationPolicyType } from 'models/notification_policy'; +import { User } from 'models/user/user.types'; +import { makeRequest } from 'network'; +import { Mixpanel } from 'services/mixpanel'; +import { RootStore } from 'state'; +import { move } from 'state/helpers'; + +import { Cloud } from './cloud.types'; + +export class CloudStore extends BaseStore { + @observable.shallow + searchResult: { matched_users_count?: number; results?: Array } = {}; + + @observable.shallow + items: { [id: string]: Cloud } = {}; + + constructor(rootStore: RootStore) { + super(rootStore); + + this.path = '/cloud_users/'; + } + + @action + async updateItems(page = 1) { + const { matched_users_count, results } = await makeRequest(this.path, { + params: { page }, + }); + + this.items = { + ...this.items, + ...results.reduce( + (acc: { [key: number]: Cloud }, item: Cloud) => ({ + ...acc, + [item.id]: item, + }), + {} + ), + }; + + this.searchResult = { + matched_users_count, + results: results.map((item: Cloud) => item.id), + }; + } + + getSearchResult() { + return { + matched_users_count: this.searchResult.matched_users_count, + results: this.searchResult.results && this.searchResult.results.map((id: Cloud['id']) => this.items?.[id]), + }; + } + + async syncCloudUsers() { + return await makeRequest(`${this.path}`, { method: 'POST' }); + } + + async syncCloudUser(id: string) { + return await makeRequest(`${this.path}${id}/sync/`, { method: 'POST' }); + } + + async getCloudHeartbeat() { + return await makeRequest(`/cloud_heartbeat/`, { method: 'POST' }).catch((error) => { + console.log(error); + }); + } + + async getCloudUser(id: string) { + return await makeRequest(`${this.path}${id}`, { method: 'GET' }); + } + + async getCloudConnectionStatus() { + return await makeRequest(`/cloud_connection/`, { method: 'GET' }); + } + + @action + async disconnectToCloud() { + return await makeRequest(`/cloud_connection/`, { method: 'DELETE' }); + } +} diff --git a/grafana-plugin/src/models/cloud/cloud.types.ts b/grafana-plugin/src/models/cloud/cloud.types.ts new file mode 100644 index 00000000..15658b3d --- /dev/null +++ b/grafana-plugin/src/models/cloud/cloud.types.ts @@ -0,0 +1,9 @@ +export interface Cloud { + id: string; + username: string; + email: string; + cloud_data?: { + status?: number; + link?: string; + }; +} diff --git a/grafana-plugin/src/models/global_setting/global_setting.ts b/grafana-plugin/src/models/global_setting/global_setting.ts index a7e6deb0..edcb2986 100644 --- a/grafana-plugin/src/models/global_setting/global_setting.ts +++ b/grafana-plugin/src/models/global_setting/global_setting.ts @@ -60,4 +60,9 @@ export class GlobalSettingStore extends BaseStore { return this.searchResult[query].map((globalSettingId: GlobalSetting['id']) => this.items[globalSettingId]); } + + async getGlobalSettingItemByName(name: string) { + const results = await this.getAll(); + return results.find((element: { name: string }) => element.name === name); + } } diff --git a/grafana-plugin/src/models/user/user.types.ts b/grafana-plugin/src/models/user/user.types.ts index cb4e03bf..4f1ba2ed 100644 --- a/grafana-plugin/src/models/user/user.types.ts +++ b/grafana-plugin/src/models/user/user.types.ts @@ -50,4 +50,7 @@ export interface User { permissions: UserAction[]; trigger_video_call?: boolean; export_url?: string; + status?: number; + link?: string; + cloud_connection_status?: number; } diff --git a/grafana-plugin/src/pages/cloud/CloudPage.module.css b/grafana-plugin/src/pages/cloud/CloudPage.module.css new file mode 100644 index 00000000..416d2a70 --- /dev/null +++ b/grafana-plugin/src/pages/cloud/CloudPage.module.css @@ -0,0 +1,67 @@ +.info-block { + width: 70%; + min-width: 1100px; + padding: 24px; +} + +.warning-message { + color: var(--warning-text-color); +} + +.success-message { + color: var(--success-text-color); +} + +.error-message { + color: var(--error-text-color); +} + +.user-table { + margin-top: 24px; + width: 100%; +} + +.user-row { + height: 32px; +} + +.cloud-page-title, +.heartbit-button { + margin-top: 24px; +} + +.cloud-oncall-name { + color: #f55f3e; +} + +.block-icon { + color: var(--secondary-text-color); +} + +.error-icon { + display: inline-block; + white-space: break-spaces; + line-height: 20px; + color: var(--error-text-color); +} + +.error-icon svg { + vertical-align: middle; +} + +.heart-icon { + color: var(--secondary-text-color); + margin-right: 8px; +} + +.block-button { + margin-top: 24px; +} + +.table-title { + margin-bottom: 16px; +} + +.table-button { + float: right; +} diff --git a/grafana-plugin/src/pages/cloud/CloudPage.tsx b/grafana-plugin/src/pages/cloud/CloudPage.tsx new file mode 100644 index 00000000..d81ce0c9 --- /dev/null +++ b/grafana-plugin/src/pages/cloud/CloudPage.tsx @@ -0,0 +1,412 @@ +import React, { useCallback, useEffect, useState } from 'react'; + +import { getLocationSrv, LocationUpdate } from '@grafana/runtime'; +import { + Field, + Input, + Button, + Modal, + HorizontalGroup, + Alert, + Icon, + VerticalGroup, + Table, + LoadingPlaceholder, +} from '@grafana/ui'; +import cn from 'classnames/bind'; +import { observer } from 'mobx-react'; + +import Block from 'components/GBlock/Block'; +import GTable from 'components/GTable/GTable'; +import PluginLink from 'components/PluginLink/PluginLink'; +import Text from 'components/Text/Text'; +import WithConfirm from 'components/WithConfirm/WithConfirm'; +import { CrossCircleIcon, HeartIcon } from 'icons'; +import { Cloud } from 'models/cloud/cloud.types'; +import { WithStoreProps } from 'state/types'; +import { useStore } from 'state/useStore'; +import { withMobXProviderContext } from 'state/withStore'; +import { openErrorNotification } from 'utils'; + +import styles from './CloudPage.module.css'; + +const cx = cn.bind(styles); + +interface CloudPageProps extends WithStoreProps {} +const ITEMS_PER_PAGE = 50; + +const CloudPage = observer((props: CloudPageProps) => { + const store = useStore(); + const [page, setPage] = useState(1); + const [cloudApiKey, setCloudApiKey] = useState(''); + const [apiKeyError, setApiKeyError] = useState(false); + const [cloudIsConnected, setCloudIsConnected] = useState(undefined); + const [cloudNotificationsEnabled, setCloudNotificationsEnabled] = useState(false); + const [heartbeatLink, setheartbeatLink] = useState(null); + const [heartbeatEnabled, setheartbeatEnabled] = useState(false); + const [showConfirmationModal, setShowConfirmationModal] = useState(false); + const [syncingUsers, setSyncingUsers] = useState(false); + + useEffect(() => { + store.cloudStore.updateItems(page); + store.cloudStore.getCloudConnectionStatus().then((cloudStatus) => { + setCloudIsConnected(cloudStatus.cloud_connection_status); + setheartbeatEnabled(cloudStatus.cloud_heartbeat_enabled); + setheartbeatLink(cloudStatus.cloud_heartbeat_link); + setCloudNotificationsEnabled(cloudStatus.cloud_notifications_enabled); + }); + }, [cloudIsConnected]); + + const { matched_users_count, results } = store.cloudStore.getSearchResult(); + + const handleChangePage = (page: number) => { + setPage(page); + store.cloudStore.updateItems(page); + }; + + const handleChangeCloudApiKey = useCallback((e) => { + setCloudApiKey(e.target.value); + setApiKeyError(false); + }, []); + + const saveKeyAndConnect = () => { + setShowConfirmationModal(true); + }; + + const disconnectCloudOncall = () => { + setCloudIsConnected(false); + store.cloudStore.disconnectToCloud(); + }; + + const connectToCloud = async () => { + setShowConfirmationModal(false); + const globalSettingItem = await store.globalSettingStore.getGlobalSettingItemByName('GRAFANA_CLOUD_ONCALL_TOKEN'); + store.globalSettingStore + .update(globalSettingItem?.id, { name: 'GRAFANA_CLOUD_ONCALL_TOKEN', value: cloudApiKey }, { sync_users: false }) + .then(async (response) => { + if (response.error) { + setCloudIsConnected(false); + setApiKeyError(true); + openErrorNotification(response.error); + } else { + setCloudIsConnected(true); + syncUsers(); + const heartbeatData: { link: string } = await store.cloudStore.getCloudHeartbeat(); + setheartbeatLink(heartbeatData?.link); + } + }); + }; + + const syncUsers = async () => { + setSyncingUsers(true); + await store.cloudStore.syncCloudUsers(); + await store.cloudStore.updateItems(); + setSyncingUsers(false); + }; + + const handleLinkClick = (link: string) => { + window.location.replace(link); + }; + + const renderButtons = (user: Cloud) => { + switch (user?.cloud_data?.status) { + case 0: + return null; + case 1: + return null; + case 2: + return ( + + ); + case 3: + return ( + + ); + default: + return null; + } + }; + + const renderStatus = (user: Cloud) => { + switch (user?.cloud_data?.status) { + case 0: + return Grafana Cloud is not synced; + case 1: + return User not found in Grafana Cloud; + case 2: + return Phone number is not verified in Grafana Cloud; + case 3: + return Phone number verified; + + default: + return User not found in Grafana Cloud; + } + }; + + const renderStatusIcon = (user: Cloud) => { + switch (user?.cloud_data?.status) { + case 0: + return ( +
+ +
+ ); + case 1: + return ( +
+ +
+ ); + + case 2: + return ; + case 3: + return ; + default: + return ( +
+ +
+ ); + } + }; + + const renderEmail = (user: Cloud) => { + return {user.email}; + }; + + const columns = [ + { + width: '2%', + render: renderStatusIcon, + key: 'statusIcon', + }, + { + width: '28%', + render: renderEmail, + key: 'email', + }, + { + width: '50%', + render: renderStatus, + key: 'status', + }, + { + width: '20%', + render: renderButtons, + key: 'buttons', + align: 'actions', + }, + ]; + + const ConnectedBlock = ( + + + + + Cloud OnCall API key + + Cloud OnCall is sucessfully connected. + + + + + + + + + + + + + Monitor cloud instance with heartbeat + + + Once connected, current OnCall instance will send heartbeats every 3 minutes to the cloud Instance. If no + heartbeat will be received in 10 minutes, cloud instance will issue an alert. + +
+ {heartbeatEnabled ? ( + heartbeatLink ? ( + + ) : ( + Heartbeat will be created in a moment automatically + ) + ) : ( + Heartbeat is not enabled. You can go to the Env Variables tab and enable it + )} +
+
+
+ + {cloudNotificationsEnabled ? ( + + + SMS and phone call notifications + + +
+ + { + 'Ask your users to sign up in Grafana Cloud, verify phone number and feel free to set up SMS & phone call notificaitons in personal settings! Only users with Admin or Editor role will be synced.' + } + + + ( +
+ + + {matched_users_count ? matched_users_count : 0} user + {matched_users_count === 1 ? '' : 's'} + {` matched between OSS and Cloud OnCall`} + + {syncingUsers ? ( + + ) : ( + + )} + +
+ )} + rowKey="id" + // @ts-ignore + columns={columns} + data={results} + pagination={{ + page, + total: Math.ceil((matched_users_count || 0) / ITEMS_PER_PAGE), + onChange: handleChangePage, + }} + /> +
+
+ ) : ( + + + SMS and phone call notifications + + + {'Please enable Grafana cloud notification to be able to see list of cloud users'} + + + )} +
+
+ ); + + const DisconnectedBlock = ( + + + + + Cloud OnCall API key + + + + + + + + + + + + + + Monitor cloud instance with heartbeat + + + Once connected, current OnCall instance will send heartbeats every 3 minutes to the cloud Instance. If no + heartbeat will be received in 10 minutes, cloud instance will issue an alert. + + + + + + + SMS and phone call notifications + + + Users matched between OSS and Cloud OnCall currently unavailable. + + + + ); + + return ( +
+ + + Connect Open Source OnCall and Cloud OnCall + + {cloudIsConnected === undefined ? ( + + ) : cloudIsConnected ? ( + ConnectedBlock + ) : ( + DisconnectedBlock + )} + + {showConfirmationModal && ( + setShowConfirmationModal(false)} + > + + + + + + )} + +
+ ); +}); + +export default withMobXProviderContext(CloudPage); diff --git a/grafana-plugin/src/pages/incidents/Incidents.tsx b/grafana-plugin/src/pages/incidents/Incidents.tsx index 9ba1dd71..6e8f3f72 100644 --- a/grafana-plugin/src/pages/incidents/Incidents.tsx +++ b/grafana-plugin/src/pages/incidents/Incidents.tsx @@ -302,12 +302,11 @@ class Incidents extends React.Component (results && results.some((alert: AlertType) => alert.undoAction)) || Object.keys(affectedRows).length ); - console.log('results', results); return (
{this.renderBulkActions()} a { + color: var(--primary-text-link); +} diff --git a/grafana-plugin/src/pages/livesettings/LiveSettingsPage.tsx b/grafana-plugin/src/pages/livesettings/LiveSettingsPage.tsx index 71e8b6aa..af261122 100644 --- a/grafana-plugin/src/pages/livesettings/LiveSettingsPage.tsx +++ b/grafana-plugin/src/pages/livesettings/LiveSettingsPage.tsx @@ -197,6 +197,7 @@ class LiveSettings extends React.Component dangerouslySetInnerHTML={{ __html: item.description, }} + className={cx('description-style')} /> ); }; diff --git a/grafana-plugin/src/pages/users/Users.module.css b/grafana-plugin/src/pages/users/Users.module.css index c9922e8c..021460b0 100644 --- a/grafana-plugin/src/pages/users/Users.module.css +++ b/grafana-plugin/src/pages/users/Users.module.css @@ -50,3 +50,22 @@ margin-right: 8px; color: var(--warning-text-color); } + +.error-icon { + display: inline-block; + white-space: break-spaces; + line-height: 20px; + color: var(--error-text-color); +} + +.error-icon svg { + vertical-align: middle; +} + +.warning-message { + color: var(--warning-text-color); +} + +.success-message { + color: var(--success-text-color); +} diff --git a/grafana-plugin/src/pages/users/Users.tsx b/grafana-plugin/src/pages/users/Users.tsx index 67a0bf59..87abf9cd 100644 --- a/grafana-plugin/src/pages/users/Users.tsx +++ b/grafana-plugin/src/pages/users/Users.tsx @@ -14,8 +14,10 @@ import Text from 'components/Text/Text'; import UsersFilters from 'components/UsersFilters/UsersFilters'; import UserSettings from 'containers/UserSettings/UserSettings'; import { WithPermissionControl } from 'containers/WithPermissionControl/WithPermissionControl'; +import { CrossCircleIcon } from 'icons'; import { getRole } from 'models/user/user.helpers'; import { User, User as UserType, UserRole } from 'models/user/user.types'; +import { AppFeature } from 'state/features'; import { WithStoreProps } from 'state/types'; import { UserAction } from 'state/userAction'; import { withMobXProviderContext } from 'state/withStore'; @@ -290,10 +292,37 @@ class Users extends React.Component { }; renderNote = (user: UserType) => { - if (!user.verified_phone_number || !user.slack_user_identity) { + const { store } = this.props; + let phone_verified; + let phone_verified_message; + if (store.hasFeature(AppFeature.CloudNotifications)) { + // If cloud notifications is enabled show message about its status, not local phone verification. + switch (user.cloud_connection_status) { + case 0: + phone_verified = false; + phone_verified_message = 'Cloud is not synced'; + break; + case 1: + phone_verified = false; + phone_verified_message = 'User not matched with cloud'; + break; + case 2: + phone_verified = false; + phone_verified_message = 'Phone number is not verified in Grafana Cloud'; + break; + case 3: + phone_verified = false; + phone_verified_message = 'Phone number is verified in Grafana Cloud'; + break; + } + } else { + phone_verified = user.verified_phone_number; + phone_verified_message = 'Phone not verified'; + } + if (!phone_verified || !user.slack_user_identity || !user.telegram_configuration) { let texts = []; - if (!user.verified_phone_number) { - texts.push('Phone not verified'); + if (!phone_verified) { + texts.push(phone_verified_message); } if (!user.slack_user_identity) { texts.push('Slack not verified'); diff --git a/grafana-plugin/src/state/features.ts b/grafana-plugin/src/state/features.ts index 8363575c..bf915f19 100644 --- a/grafana-plugin/src/state/features.ts +++ b/grafana-plugin/src/state/features.ts @@ -3,4 +3,6 @@ export enum AppFeature { Telegram = 'telegram', LiveSettings = 'live_settings', MobileApp = 'mobile_app', + CloudNotifications = 'grafana_cloud_notifications', + CloudConnection = 'grafana_cloud_connection', } diff --git a/grafana-plugin/src/state/rootBaseStore.ts b/grafana-plugin/src/state/rootBaseStore.ts index 5900ab1f..331f6ca1 100644 --- a/grafana-plugin/src/state/rootBaseStore.ts +++ b/grafana-plugin/src/state/rootBaseStore.ts @@ -9,6 +9,7 @@ import { AlertReceiveChannel } from 'models/alert_receive_channel/alert_receive_ import { AlertReceiveChannelFiltersStore } from 'models/alert_receive_channel_filters/alert_receive_channel_filters'; import { AlertGroupStore } from 'models/alertgroup/alertgroup'; import { ApiTokenStore } from 'models/api_token/api_token'; +import { CloudStore } from 'models/cloud/cloud'; import { EscalationChainStore } from 'models/escalation_chain/escalation_chain'; import { EscalationPolicyStore } from 'models/escalation_policy/escalation_policy'; import { GlobalSettingStore } from 'models/global_setting/global_setting'; @@ -81,6 +82,7 @@ export class RootBaseStore { // -------------------------- userStore: UserStore = new UserStore(this); + cloudStore: CloudStore = new CloudStore(this); grafanaTeamStore: GrafanaTeamStore = new GrafanaTeamStore(this); alertReceiveChannelStore: AlertReceiveChannelStore = new AlertReceiveChannelStore(this); outgoingWebhookStore: OutgoingWebhookStore = new OutgoingWebhookStore(this); diff --git a/grafana-plugin/src/utils/consts.ts b/grafana-plugin/src/utils/consts.ts index 7546075e..c5e77b2a 100644 --- a/grafana-plugin/src/utils/consts.ts +++ b/grafana-plugin/src/utils/consts.ts @@ -1,4 +1,4 @@ import plugin from '../../package.json'; // eslint-disable-line export const APP_TITLE = 'Grafana OnCall'; -export const APP_SUBTITLE = `Incident Response powered by Amixr (${plugin?.version})`; +export const APP_SUBTITLE = `Incident Response (${plugin?.version})`; diff --git a/grafana-plugin/src/utils/hooks.ts b/grafana-plugin/src/utils/hooks.ts index bb456564..b26ff31e 100644 --- a/grafana-plugin/src/utils/hooks.ts +++ b/grafana-plugin/src/utils/hooks.ts @@ -16,6 +16,7 @@ type Args = { orgRole: 'Viewer' | 'Editor' | 'Admin'; }; enableLiveSettings: boolean; + enableCloudPage: boolean; }; export function useForceUpdate() { @@ -23,7 +24,7 @@ export function useForceUpdate() { return () => setValue((value) => value + 1); } -export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSettings }: Args) { +export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSettings, enableCloudPage }: Args) { return useMemo(() => { const tabs: NavModelItem[] = []; @@ -36,7 +37,8 @@ export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSe hideFromTabs: hideFromTabs || (role === 'Admin' && grafanaUser.orgRole !== role) || - (id === 'live-settings' && !enableLiveSettings), + (id === 'live-settings' && !enableLiveSettings) || + (id === 'cloud' && !enableCloudPage), }); if (page === id) { @@ -61,7 +63,7 @@ export function useNavModel({ meta, pages, path, page, grafanaUser, enableLiveSe node, main: node, }; - }, [meta.info.logos.large, pages, path, page, enableLiveSettings]); + }, [meta.info.logos.large, pages, path, page, enableLiveSettings, enableCloudPage]); } export function usePrevious(value: any) { diff --git a/grafana-plugin/src/vars.css b/grafana-plugin/src/vars.css index a0af933b..0216e04c 100644 --- a/grafana-plugin/src/vars.css +++ b/grafana-plugin/src/vars.css @@ -22,6 +22,8 @@ --secondary-text-color: rgba(36, 41, 46, 0.75); --disabled-text-color: rgba(36, 41, 46, 0.5); --warning-text-color: #8a6c00; + --success-text-color: rgb(10, 118, 78); + --error-text-color: rgb(207, 14, 91); --primary-text-link: #1f62e0; --timeline-icon-background: rgba(70, 76, 84, 0); --timeline-icon-background-resolution-note: rgba(50, 116, 217, 0); @@ -38,6 +40,8 @@ --secondary-text-color: rgba(204, 204, 220, 0.65); --disabled-text-color: rgba(204, 204, 220, 0.4); --warning-text-color: #f8d06b; + --success-text-color: rgb(108, 207, 142); + --error-text-color: rgb(255, 82, 134); --primary-text-link: #6e9fff; --timeline-icon-background: rgba(70, 76, 84, 1); --timeline-icon-background-resolution-note: rgba(50, 116, 217, 1);