Add task to delete empty deleted integrations from the database (#3941)
# What this PR does Add task which will cleanup deleted integrations from the database if they don't have any alert groups. This is to help address an issue where queries are slowing down due to having a large numbers of ids that do not contribute to the result. This will be connected as part of sync organization task once it tests out ok. ## Which issue(s) this PR fixes ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required)
This commit is contained in:
parent
9c0bc26680
commit
2c7ebcf87c
4 changed files with 93 additions and 1 deletions
|
|
@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## Unreleased
|
||||
|
||||
### Added
|
||||
|
||||
- Add manually run task to cleanup unused integrations @mderynck ([#3941](https://github.com/grafana/oncall/pull/3941))
|
||||
|
||||
### Changed
|
||||
|
||||
- Change plugin build to use new packages instead of deprecated grafana-toolkit @maskin25 ([#3837](https://github.com/grafana/oncall/pull/3837))
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from celery.utils.log import get_task_logger
|
|||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.grafana_plugin.helpers import GcomAPIClient
|
||||
from apps.grafana_plugin.helpers.client import GrafanaAPIClient
|
||||
from apps.grafana_plugin.helpers.gcom import get_active_instance_ids, get_deleted_instance_ids, get_stack_regions
|
||||
|
|
@ -139,3 +140,26 @@ def sync_team_members_for_organization_async(organization_pk):
|
|||
|
||||
grafana_api_client = GrafanaAPIClient(api_url=organization.grafana_url, api_token=organization.api_token)
|
||||
sync_team_members(grafana_api_client, organization)
|
||||
|
||||
|
||||
@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=1)
|
||||
def cleanup_empty_deleted_integrations(organization_pk, dry_run=True):
|
||||
try:
|
||||
organization = Organization.objects.get(pk=organization_pk)
|
||||
except Organization.DoesNotExist:
|
||||
logger.info(f"Organization {organization_pk} was not found")
|
||||
return
|
||||
|
||||
integrations_qs = AlertReceiveChannel.objects_with_deleted.filter(
|
||||
organization=organization, deleted_at__isnull=False, alert_groups=None
|
||||
).distinct()
|
||||
logger.info(
|
||||
f"Found count={len(integrations_qs)} integrations in org={organization.public_primary_key} that are both empty and deleted"
|
||||
)
|
||||
|
||||
for integration in integrations_qs:
|
||||
logger.info(
|
||||
f"Deleting integration ppk={integration.public_primary_key} in organization={organization.stack_slug} dry_run={dry_run}"
|
||||
)
|
||||
if not dry_run:
|
||||
integration.hard_delete()
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ from django.conf import settings
|
|||
from django.test.utils import override_settings
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.grafana_plugin.tasks.sync import run_organization_sync
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.grafana_plugin.tasks.sync import cleanup_empty_deleted_integrations, run_organization_sync
|
||||
|
||||
|
||||
class SyncOrganization(object):
|
||||
|
|
@ -100,3 +101,65 @@ def test_sync_organization_skip_cloud(
|
|||
assert test_client.called and not syncer.called and not syncer.org
|
||||
syncer.reset()
|
||||
test_client.reset()
|
||||
|
||||
|
||||
def create_test_integrations_for_cleanup(make_organization, make_alert_receive_channel, make_alert_group):
|
||||
org = make_organization()
|
||||
org_channel = make_alert_receive_channel(organization=org)
|
||||
org_channel_empty = make_alert_receive_channel(organization=org)
|
||||
org_channel_deleted = make_alert_receive_channel(organization=org)
|
||||
org_channel_deleted_empty = make_alert_receive_channel(organization=org)
|
||||
make_alert_group(alert_receive_channel=org_channel)
|
||||
make_alert_group(alert_receive_channel=org_channel_deleted)
|
||||
org_channel_deleted.delete()
|
||||
org_channel_deleted_empty.delete()
|
||||
|
||||
return org, org_channel, org_channel_empty, org_channel_deleted, org_channel_deleted_empty
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"dry_run, channel1_exists, channel2_exists, channel3_exists, channel4_exists",
|
||||
[
|
||||
(True, True, True, True, True),
|
||||
(False, True, True, True, False),
|
||||
],
|
||||
)
|
||||
def test_cleanup_empty_deleted_integrations_test_run(
|
||||
make_organization,
|
||||
make_alert_receive_channel,
|
||||
make_alert_group,
|
||||
dry_run,
|
||||
channel1_exists,
|
||||
channel2_exists,
|
||||
channel3_exists,
|
||||
channel4_exists,
|
||||
):
|
||||
(
|
||||
org1,
|
||||
org1_channel,
|
||||
org1_channel_empty,
|
||||
org1_channel_deleted,
|
||||
org1_channel_deleted_empty,
|
||||
) = create_test_integrations_for_cleanup(make_organization, make_alert_receive_channel, make_alert_group)
|
||||
(
|
||||
org2,
|
||||
org2_channel,
|
||||
org2_channel_empty,
|
||||
org2_channel_deleted,
|
||||
org2_channel_deleted_empty,
|
||||
) = create_test_integrations_for_cleanup(make_organization, make_alert_receive_channel, make_alert_group)
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(organization=org1).count() == 4
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(organization=org2).count() == 4
|
||||
|
||||
cleanup_empty_deleted_integrations(org1.pk, dry_run)
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org1_channel.pk).exists() == channel1_exists
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org1_channel_empty.pk).exists() == channel2_exists
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org1_channel_deleted.pk).exists() == channel3_exists
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org1_channel_deleted_empty.pk).exists() == channel4_exists
|
||||
|
||||
# Org 2 should always be unaffected
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org2_channel.pk).exists()
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org2_channel_empty.pk).exists()
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org2_channel_deleted.pk).exists()
|
||||
assert AlertReceiveChannel.objects_with_deleted.filter(pk=org2_channel_deleted_empty.pk).exists()
|
||||
|
|
|
|||
|
|
@ -134,6 +134,7 @@ CELERY_TASK_ROUTES = {
|
|||
"apps.alerts.tasks.check_escalation_finished.check_alert_group_personal_notifications_task": {"queue": "long"},
|
||||
"apps.alerts.tasks.check_escalation_finished.check_personal_notifications_task": {"queue": "long"},
|
||||
"apps.grafana_plugin.tasks.sync.cleanup_organization_async": {"queue": "long"},
|
||||
"apps.grafana_plugin.tasks.sync.cleanup_empty_deleted_integrations": {"queue": "long"},
|
||||
"apps.grafana_plugin.tasks.sync.start_cleanup_deleted_organizations": {"queue": "long"},
|
||||
"apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"},
|
||||
"apps.grafana_plugin.tasks.sync.sync_organization_async": {"queue": "long"},
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue