add get_cached_oncall_users_for_multiple_schedules method + add DELETE /alertgroups/<id> internal API endpoint (#3280)
# What this PR does
This PR:
- adds a new method
`apps.schedules.ical_utils.get_cached_oncall_users_for_multiple_schedules`.
In short this method basically adds a layer of caching on top of
`apps.schedules.ical_utils.get_oncall_users_for_multiple_schedules`. We
store one cache value for each schedule. Cache results are stored for 15
minutes. To me this feels like a good balance between improving
performance + returning stale results. Cache values are stored as:
- key = `f"schedule_{schedule.public_primary_key}_oncall_users"`
- value = `[user.public_primary_key for user in
schedule.currently_oncall_users]`
- adds a `DELETE /alertgroups/<id>` internal API endpoint (needed by
Grafana Incident for the Add Responders integration)
- updates the `is_currently_oncall` query parameter for the `GET /users`
internal API endpoint to return ALL users when the query param value `==
"all"`
## Which issue(s) this PR fixes
https://github.com/grafana/oncall-private/issues/2264
## Checklist
- [x] Unit, integration, and e2e (if applicable) tests updated
- [x] Documentation added (or `pr:no public docs` PR label added if not
required)
- [x] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not
required)
This commit is contained in:
parent
20f949df8f
commit
eb0f465970
11 changed files with 294 additions and 30 deletions
|
|
@ -12,7 +12,7 @@ from apps.alerts.models import (
|
|||
UserHasNotification,
|
||||
)
|
||||
from apps.alerts.tasks.notify_user import notify_user_task
|
||||
from apps.schedules.ical_utils import get_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.ical_utils import get_cached_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.models import OnCallSchedule
|
||||
from apps.user_management.models import Organization, Team, User
|
||||
|
||||
|
|
@ -195,5 +195,7 @@ def unpage_user(alert_group: AlertGroup, user: User, from_user: User) -> None:
|
|||
|
||||
|
||||
def user_is_oncall(user: User) -> bool:
|
||||
schedules_with_oncall_users = get_oncall_users_for_multiple_schedules(OnCallSchedule.objects.related_to_user(user))
|
||||
schedules_with_oncall_users = get_cached_oncall_users_for_multiple_schedules(
|
||||
OnCallSchedule.objects.related_to_user(user)
|
||||
)
|
||||
return user.pk in {user.pk for _, users in schedules_with_oncall_users.items() for user in users}
|
||||
|
|
|
|||
|
|
@ -2098,3 +2098,26 @@ def test_wipe_clears_cache(
|
|||
for field_name in AlertFieldsCacheSerializerMixin.ALL_FIELD_NAMES
|
||||
]
|
||||
assert not any([cache.get(key) for key in alert_cache_keys])
|
||||
|
||||
|
||||
@patch("apps.api.views.alert_group.delete_alert_group.apply_async")
|
||||
@pytest.mark.django_db
|
||||
def test_delete(mock_delete_alert_group, make_user_auth_headers, alert_group_internal_api_setup):
|
||||
client = APIClient()
|
||||
user, token, alert_groups = alert_group_internal_api_setup
|
||||
resolved_alert_group, acked_alert_group, new_alert_group, _ = alert_groups
|
||||
|
||||
auth_headers = make_user_auth_headers(user, token)
|
||||
|
||||
for alert_group in [resolved_alert_group, acked_alert_group, new_alert_group]:
|
||||
mock_delete_alert_group.reset_mock()
|
||||
|
||||
url = reverse("api-internal:alertgroup-detail", kwargs={"pk": alert_group.public_primary_key})
|
||||
response = client.delete(url, **auth_headers)
|
||||
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
mock_delete_alert_group.assert_called_once_with((alert_group.pk, user.pk))
|
||||
|
||||
url = reverse("api-internal:alertgroup-detail", kwargs={"pk": "potato"})
|
||||
response = client.delete(url, **auth_headers)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
|
|
|||
|
|
@ -1943,7 +1943,7 @@ def test_users_is_currently_oncall_attribute_works_properly(
|
|||
user2.public_primary_key: False,
|
||||
}
|
||||
|
||||
for user in response.json()["results"]:
|
||||
for user in response.json():
|
||||
assert user["teams"] == []
|
||||
assert user["is_currently_oncall"] == oncall_statuses[user["pk"]]
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from rest_framework.response import Response
|
|||
from apps.alerts.constants import ActionSource
|
||||
from apps.alerts.models import Alert, AlertGroup, AlertReceiveChannel, EscalationChain, ResolutionNote
|
||||
from apps.alerts.paging import unpage_user
|
||||
from apps.alerts.tasks import send_update_resolution_note_signal
|
||||
from apps.alerts.tasks import delete_alert_group, send_update_resolution_note_signal
|
||||
from apps.api.errors import AlertGroupAPIError
|
||||
from apps.api.permissions import RBACPermission
|
||||
from apps.api.serializers.alert_group import AlertGroupListSerializer, AlertGroupSerializer
|
||||
|
|
@ -273,6 +273,7 @@ class AlertGroupView(
|
|||
PublicPrimaryKeyMixin,
|
||||
mixins.RetrieveModelMixin,
|
||||
mixins.ListModelMixin,
|
||||
mixins.DestroyModelMixin,
|
||||
viewsets.GenericViewSet,
|
||||
):
|
||||
authentication_classes = (
|
||||
|
|
@ -305,7 +306,7 @@ class AlertGroupView(
|
|||
"preview_template": [RBACPermission.Permissions.INTEGRATIONS_TEST],
|
||||
}
|
||||
|
||||
http_method_names = ["get", "post"]
|
||||
http_method_names = ["get", "post", "delete"]
|
||||
|
||||
serializer_class = AlertGroupSerializer
|
||||
|
||||
|
|
@ -469,6 +470,11 @@ class AlertGroupView(
|
|||
|
||||
return alert_groups
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
delete_alert_group.apply_async((instance.pk, request.user.pk))
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
@extend_schema(responses=inline_serializer(name="AlertGroupStats", fields={"count": serializers.IntegerField()}))
|
||||
@action(detail=False)
|
||||
def stats(self, *args, **kwargs):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from apps.api.permissions import RBACPermission
|
|||
from apps.api.serializers.team import TeamLongSerializer, TeamSerializer
|
||||
from apps.auth_token.auth import PluginAuthentication
|
||||
from apps.mobile_app.auth import MobileAppAuthTokenAuthentication
|
||||
from apps.schedules.ical_utils import get_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.ical_utils import get_cached_oncall_users_for_multiple_schedules
|
||||
from apps.user_management.models import Team
|
||||
from common.api_helpers.mixins import PublicPrimaryKeyMixin
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ class TeamViewSet(
|
|||
"""
|
||||
team_ids = [t.id for t in self.filter_queryset(self.get_queryset())]
|
||||
team_schedules = self.request.user.organization.oncall_schedules.filter(team__id__in=team_ids)
|
||||
return get_oncall_users_for_multiple_schedules(team_schedules)
|
||||
return get_cached_oncall_users_for_multiple_schedules(team_schedules)
|
||||
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ from apps.phone_notifications.exceptions import (
|
|||
ProviderNotSupports,
|
||||
)
|
||||
from apps.phone_notifications.phone_backend import PhoneBackend
|
||||
from apps.schedules.ical_utils import get_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.ical_utils import get_cached_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.models import OnCallSchedule
|
||||
from apps.telegram.client import TelegramClient
|
||||
from apps.telegram.models import TelegramVerificationCode
|
||||
|
|
@ -232,7 +232,7 @@ class UserView(
|
|||
The result of this method is cached and is reused for the whole lifetime of a request,
|
||||
since self.get_serializer_context() is called multiple times for every instance in the queryset.
|
||||
"""
|
||||
return get_oncall_users_for_multiple_schedules(self.request.user.organization.oncall_schedules.all())
|
||||
return get_cached_oncall_users_for_multiple_schedules(self.request.user.organization.oncall_schedules.all())
|
||||
|
||||
def _get_is_currently_oncall_query_param(self) -> str:
|
||||
return self.request.query_params.get("is_currently_oncall", "").lower()
|
||||
|
|
@ -290,33 +290,35 @@ class UserView(
|
|||
def _get_oncall_user_ids():
|
||||
return {user.pk for _, users in self.schedules_with_oncall_users.items() for user in users}
|
||||
|
||||
paginate_results = True
|
||||
|
||||
if (is_currently_oncall_query_param := self._get_is_currently_oncall_query_param()) == "true":
|
||||
# client explicitly wants to filter out users that are on-call
|
||||
queryset = queryset.filter(pk__in=_get_oncall_user_ids())
|
||||
elif is_currently_oncall_query_param == "false":
|
||||
# user explicitly wants to filter out on-call users
|
||||
queryset = queryset.exclude(pk__in=_get_oncall_user_ids())
|
||||
elif is_currently_oncall_query_param == "all":
|
||||
# return all users, don't paginate
|
||||
paginate_results = False
|
||||
|
||||
page = self.paginate_queryset(queryset)
|
||||
context = self.get_serializer_context()
|
||||
|
||||
if page is not None:
|
||||
context = self.get_serializer_context()
|
||||
if paginate_results and (page := self.paginate_queryset(queryset)) is not None:
|
||||
if settings.IS_OPEN_SOURCE and live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED:
|
||||
from apps.oss_installation.models import CloudConnector, CloudUserIdentity
|
||||
|
||||
if settings.IS_OPEN_SOURCE:
|
||||
if live_settings.GRAFANA_CLOUD_NOTIFICATIONS_ENABLED:
|
||||
from apps.oss_installation.models import CloudConnector, CloudUserIdentity
|
||||
if (connector := CloudConnector.objects.first()) is not None:
|
||||
emails = list(queryset.values_list("email", flat=True))
|
||||
cloud_identities = list(CloudUserIdentity.objects.filter(email__in=emails))
|
||||
cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities}
|
||||
context["cloud_identities"] = cloud_identities
|
||||
context["connector"] = connector
|
||||
|
||||
connector = CloudConnector.objects.first()
|
||||
if connector is not None:
|
||||
emails = list(queryset.values_list("email", flat=True))
|
||||
cloud_identities = list(CloudUserIdentity.objects.filter(email__in=emails))
|
||||
cloud_identities = {cloud_identity.email: cloud_identity for cloud_identity in cloud_identities}
|
||||
context["cloud_identities"] = cloud_identities
|
||||
context["connector"] = connector
|
||||
serializer = self.get_serializer(page, many=True, context=context)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
|
||||
serializer = self.get_serializer(queryset, many=True)
|
||||
serializer = self.get_serializer(queryset, many=True, context=context)
|
||||
return Response(serializer.data)
|
||||
|
||||
def retrieve(self, request, *args, **kwargs) -> Response:
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from typing import TYPE_CHECKING
|
|||
|
||||
import pytz
|
||||
import requests
|
||||
from django.core.cache import cache
|
||||
from django.db.models import Q
|
||||
from icalendar import Calendar
|
||||
from icalendar import Event as IcalEvent
|
||||
|
|
@ -385,6 +386,100 @@ def get_oncall_users_for_multiple_schedules(
|
|||
return oncall_users
|
||||
|
||||
|
||||
def get_cached_oncall_users_for_multiple_schedules(schedules: typing.List["OnCallSchedule"]) -> SchedulesOnCallUsers:
|
||||
"""
|
||||
More "performant" version of `apps.schedules.ical_utils.get_oncall_users_for_multiple_schedules`
|
||||
which caches results, for 15 minutes.
|
||||
|
||||
The cache results are stored in the following format:
|
||||
- `schedule_<schedule_public_primary_key>_oncall_users`: [list of oncall user public_primary_keys for the schedule]
|
||||
|
||||
This method will return the cached version of the results, if they exist, otherwise it will calculate
|
||||
the results and cache them for 15 minutes.
|
||||
|
||||
Note: since we cannot cache Python objects we will cache the primary keys of schedules/users and
|
||||
then fetch these from the db in two queries (one for users, one for schedules).
|
||||
"""
|
||||
from apps.schedules.models import OnCallSchedule
|
||||
from apps.user_management.models import User
|
||||
|
||||
def _generate_cache_key_for_schedule_oncall_users(schedule: "OnCallSchedule") -> str:
|
||||
return f"schedule_{schedule.public_primary_key}_oncall_users"
|
||||
|
||||
def _get_schedule_public_primary_key_from_schedule_oncall_users_cache_key(cache_key: str) -> str:
|
||||
return cache_key.replace("schedule_", "").replace("_oncall_users", "")
|
||||
|
||||
CACHE_TTL = 15 * 60 # 15 minutes in seconds
|
||||
|
||||
cache_keys: typing.List[str] = [_generate_cache_key_for_schedule_oncall_users(schedule) for schedule in schedules]
|
||||
|
||||
# get_many returns a dictionary with all the keys we asked for that actually exist
|
||||
# in the cache (and haven’t expired)
|
||||
cached_results = cache.get_many(cache_keys)
|
||||
|
||||
schedule_public_primary_keys_to_fetch_from_db: typing.Set[str] = set()
|
||||
user_public_primary_keys_to_fetch_from_db: typing.Set[str] = set()
|
||||
|
||||
# for the results returned from the cache we need to determine which schedule and user objects
|
||||
# that we will need to fetch from the database (since we can't cache python objects and are only caching
|
||||
# the objects primary keys)
|
||||
for cache_key, oncall_users in cached_results.items():
|
||||
schedule_public_primary_keys_to_fetch_from_db.add(
|
||||
_get_schedule_public_primary_key_from_schedule_oncall_users_cache_key(cache_key)
|
||||
)
|
||||
|
||||
user_public_primary_keys_to_fetch_from_db.update(oncall_users)
|
||||
|
||||
# calculate results for schedules that weren't in the cache
|
||||
cached_result_keys = list(cached_results.keys())
|
||||
schedule_primary_keys_we_need_to_calculate_results_for = [
|
||||
_get_schedule_public_primary_key_from_schedule_oncall_users_cache_key(cache_key)
|
||||
for cache_key in cache_keys
|
||||
if cache_key not in cached_result_keys
|
||||
]
|
||||
|
||||
schedules_we_need_to_calculate_results_for = [
|
||||
schedule
|
||||
for schedule in schedules
|
||||
if schedule.public_primary_key in schedule_primary_keys_we_need_to_calculate_results_for
|
||||
]
|
||||
|
||||
results = get_oncall_users_for_multiple_schedules(schedules_we_need_to_calculate_results_for)
|
||||
|
||||
# update the cache with the new results we just got back
|
||||
new_results_to_update_in_cache: typing.Dict[str, typing.List[str]] = {}
|
||||
for schedule, oncall_users in results.items():
|
||||
oncall_user_public_primary_keys = [user.public_primary_key for user in oncall_users]
|
||||
new_results_to_update_in_cache[
|
||||
_generate_cache_key_for_schedule_oncall_users(schedule)
|
||||
] = oncall_user_public_primary_keys
|
||||
|
||||
cache.set_many(new_results_to_update_in_cache, timeout=CACHE_TTL)
|
||||
|
||||
# make two queries to the database, one to fetch the schedule objects we need and the other to fetch
|
||||
# the user objects we need
|
||||
schedules = {
|
||||
schedule.public_primary_key: schedule
|
||||
for schedule in OnCallSchedule.objects.filter(
|
||||
public_primary_key__in=schedule_public_primary_keys_to_fetch_from_db
|
||||
)
|
||||
}
|
||||
users = {
|
||||
user.public_primary_key: user
|
||||
for user in User.objects.filter(public_primary_key__in=user_public_primary_keys_to_fetch_from_db)
|
||||
}
|
||||
|
||||
# revisit our cached_results and this time populate the results with the actual objects
|
||||
for cache_key, oncall_users in cached_results.items():
|
||||
schedule_public_primary_key = _get_schedule_public_primary_key_from_schedule_oncall_users_cache_key(cache_key)
|
||||
schedule = schedules[schedule_public_primary_key]
|
||||
oncall_users = [users[user_public_primary_key] for user_public_primary_key in oncall_users]
|
||||
|
||||
results[schedule] = oncall_users
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def parse_username_from_string(string: str) -> str:
|
||||
"""
|
||||
Parse on-call shift user from the given string
|
||||
|
|
|
|||
|
|
@ -1,15 +1,19 @@
|
|||
import datetime
|
||||
import textwrap
|
||||
from unittest.mock import patch
|
||||
from uuid import uuid4
|
||||
|
||||
import icalendar
|
||||
import pytest
|
||||
import pytz
|
||||
from django.core.cache import cache
|
||||
from django.utils import timezone
|
||||
|
||||
from apps.api.permissions import LegacyAccessControlRole
|
||||
from apps.schedules.ical_utils import (
|
||||
get_cached_oncall_users_for_multiple_schedules,
|
||||
get_icalendar_tz_or_utc,
|
||||
get_oncall_users_for_multiple_schedules,
|
||||
is_icals_equal,
|
||||
list_of_oncall_shifts_from_ical,
|
||||
list_users_to_notify_from_ical,
|
||||
|
|
@ -499,3 +503,129 @@ def test_is_icals_equal_compare_events_not_equal():
|
|||
"""
|
||||
)
|
||||
assert not is_icals_equal(with_vtimezone, without_vtimezone)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_cached_oncall_users_for_multiple_schedules(
|
||||
make_organization,
|
||||
make_user_for_organization,
|
||||
make_schedule,
|
||||
make_on_call_shift,
|
||||
):
|
||||
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
def _make_schedule_with_oncall_users(organization, oncall_users):
|
||||
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
|
||||
|
||||
shift_start_time = today - timezone.timedelta(hours=1)
|
||||
|
||||
on_call_shift = make_on_call_shift(
|
||||
organization=organization,
|
||||
shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
|
||||
start=shift_start_time,
|
||||
rotation_start=shift_start_time,
|
||||
duration=timezone.timedelta(seconds=(24 * 60 * 60)),
|
||||
priority_level=1,
|
||||
frequency=CustomOnCallShift.FREQUENCY_DAILY,
|
||||
schedule=schedule,
|
||||
)
|
||||
on_call_shift.add_rolling_users([oncall_users])
|
||||
return schedule
|
||||
|
||||
def _test_setup():
|
||||
organization = make_organization()
|
||||
users = [make_user_for_organization(organization) for _ in range(6)]
|
||||
schedule1 = _make_schedule_with_oncall_users(organization, users[:2])
|
||||
schedule2 = _make_schedule_with_oncall_users(organization, users[2:4])
|
||||
schedule3 = _make_schedule_with_oncall_users(organization, users[4:])
|
||||
|
||||
return users, (schedule1, schedule2, schedule3)
|
||||
|
||||
def _generate_cache_key(schedule):
|
||||
return f"schedule_{schedule.public_primary_key}_oncall_users"
|
||||
|
||||
# scenario: nothing is cached, need to recalculate everything and cache it
|
||||
users, schedules = _test_setup()
|
||||
schedule1, schedule2, schedule3 = schedules
|
||||
|
||||
cache.clear()
|
||||
|
||||
results = get_cached_oncall_users_for_multiple_schedules(schedules)
|
||||
|
||||
assert results == {
|
||||
schedule1: [users[0], users[1]],
|
||||
schedule2: [users[2], users[3]],
|
||||
schedule3: [users[4], users[5]],
|
||||
}
|
||||
|
||||
cached_data = cache.get_many([_generate_cache_key(s) for s in schedules])
|
||||
|
||||
assert cached_data == {
|
||||
_generate_cache_key(schedule1): [users[0].public_primary_key, users[1].public_primary_key],
|
||||
_generate_cache_key(schedule2): [users[2].public_primary_key, users[3].public_primary_key],
|
||||
_generate_cache_key(schedule3): [users[4].public_primary_key, users[5].public_primary_key],
|
||||
}
|
||||
|
||||
# scenario: schedule1 is cached, need to recalculate schedule2 and schedule3 and cache them
|
||||
users, schedules = _test_setup()
|
||||
schedule1, schedule2, schedule3 = schedules
|
||||
|
||||
cache.clear()
|
||||
cache.set(_generate_cache_key(schedule1), [users[0].public_primary_key, users[1].public_primary_key])
|
||||
|
||||
with patch(
|
||||
"apps.schedules.ical_utils.get_oncall_users_for_multiple_schedules",
|
||||
wraps=get_oncall_users_for_multiple_schedules,
|
||||
) as spy_get_oncall_users_for_multiple_schedules:
|
||||
results = get_cached_oncall_users_for_multiple_schedules(schedules)
|
||||
|
||||
# make sure we're only calling the actual method for the uncached schedules
|
||||
spy_get_oncall_users_for_multiple_schedules.assert_called_once_with([schedule2, schedule3])
|
||||
|
||||
assert results == {
|
||||
schedule1: [users[0], users[1]],
|
||||
schedule2: [users[2], users[3]],
|
||||
schedule3: [users[4], users[5]],
|
||||
}
|
||||
|
||||
cached_data = cache.get_many([_generate_cache_key(s) for s in schedules])
|
||||
assert cached_data == {
|
||||
_generate_cache_key(schedule1): [users[0].public_primary_key, users[1].public_primary_key],
|
||||
_generate_cache_key(schedule2): [users[2].public_primary_key, users[3].public_primary_key],
|
||||
_generate_cache_key(schedule3): [users[4].public_primary_key, users[5].public_primary_key],
|
||||
}
|
||||
|
||||
# scenario: everything is already cached
|
||||
users, schedules = _test_setup()
|
||||
schedule1, schedule2, schedule3 = schedules
|
||||
|
||||
cache.clear()
|
||||
cache.set_many(
|
||||
{
|
||||
_generate_cache_key(schedule1): [users[0].public_primary_key, users[1].public_primary_key],
|
||||
_generate_cache_key(schedule2): [users[2].public_primary_key, users[3].public_primary_key],
|
||||
_generate_cache_key(schedule3): [users[4].public_primary_key, users[5].public_primary_key],
|
||||
}
|
||||
)
|
||||
|
||||
with patch(
|
||||
"apps.schedules.ical_utils.get_oncall_users_for_multiple_schedules",
|
||||
wraps=get_oncall_users_for_multiple_schedules,
|
||||
) as spy_get_oncall_users_for_multiple_schedules:
|
||||
results = get_cached_oncall_users_for_multiple_schedules(schedules)
|
||||
|
||||
# make sure we're not recalculating results because everything is already cached
|
||||
spy_get_oncall_users_for_multiple_schedules.assert_called_once_with([])
|
||||
|
||||
assert results == {
|
||||
schedule1: [users[0], users[1]],
|
||||
schedule2: [users[2], users[3]],
|
||||
schedule3: [users[4], users[5]],
|
||||
}
|
||||
|
||||
cached_data = cache.get_many([_generate_cache_key(s) for s in schedules])
|
||||
assert cached_data == {
|
||||
_generate_cache_key(schedule1): [users[0].public_primary_key, users[1].public_primary_key],
|
||||
_generate_cache_key(schedule2): [users[2].public_primary_key, users[3].public_primary_key],
|
||||
_generate_cache_key(schedule3): [users[4].public_primary_key, users[5].public_primary_key],
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from rest_framework.response import Response
|
|||
|
||||
from apps.alerts.models import AlertReceiveChannel
|
||||
from apps.alerts.paging import DirectPagingUserTeamValidationError, UserNotifications, direct_paging, user_is_oncall
|
||||
from apps.schedules.ical_utils import get_oncall_users_for_multiple_schedules
|
||||
from apps.schedules.ical_utils import get_cached_oncall_users_for_multiple_schedules
|
||||
from apps.slack.constants import DIVIDER, PRIVATE_METADATA_MAX_LENGTH
|
||||
from apps.slack.errors import SlackAPIChannelNotFoundError
|
||||
from apps.slack.scenarios import scenario_step
|
||||
|
|
@ -707,7 +707,7 @@ def _get_user_select_blocks(
|
|||
def _get_users_select(
|
||||
organization: "Organization", input_id_prefix: str, action_id: str, max_options_per_group=MAX_STATIC_SELECT_OPTIONS
|
||||
) -> Block.Context | Block.Input:
|
||||
schedules = get_oncall_users_for_multiple_schedules(organization.oncall_schedules.all())
|
||||
schedules = get_cached_oncall_users_for_multiple_schedules(organization.oncall_schedules.all())
|
||||
oncall_user_pks = {user.pk for _, users in schedules.items() for user in users}
|
||||
|
||||
oncall_user_option_groups = _create_user_option_groups(
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ const AddRespondersPopup = observer(
|
|||
}
|
||||
setVisible(false);
|
||||
},
|
||||
[isCreateMode, userStore, directPagingStore, setShowUserConfirmationModal, setVisible]
|
||||
[isCreateMode, setShowUserConfirmationModal, setVisible]
|
||||
);
|
||||
|
||||
const addTeamResponder = useCallback(
|
||||
|
|
@ -97,8 +97,11 @@ const AddRespondersPopup = observer(
|
|||
);
|
||||
|
||||
const searchForUsers = useCallback(async () => {
|
||||
const userResults = await userStore.search<UserCurrentlyOnCall>({ searchTerm, is_currently_oncall: 'all' });
|
||||
setUserSearchResults(userResults.results);
|
||||
/**
|
||||
* specifying is_currently_oncall=all will tell the backend not to paginate the results
|
||||
*/
|
||||
const userResults = await userStore.search<UserCurrentlyOnCall[]>({ searchTerm, is_currently_oncall: 'all' });
|
||||
setUserSearchResults(userResults);
|
||||
}, [searchTerm]);
|
||||
|
||||
const searchForTeams = useCallback(async () => {
|
||||
|
|
|
|||
|
|
@ -116,10 +116,13 @@ export class UserStore extends BaseStore {
|
|||
delete this.itemsCurrentlyUpdating[userPk];
|
||||
}
|
||||
|
||||
async search<UT = User>(f: any = { searchTerm: '' }, page = 1): Promise<PaginatedUsersResponse<UT>> {
|
||||
/**
|
||||
* NOTE: if is_currently_oncall=all the backend will not paginate the results, it will send back an array of ALL users
|
||||
*/
|
||||
async search<RT = PaginatedUsersResponse<User>>(f: any = { searchTerm: '' }, page = 1): Promise<RT> {
|
||||
const filters = typeof f === 'string' ? { searchTerm: f } : f; // for GSelect compatibility
|
||||
const { searchTerm: search, ...restFilters } = filters;
|
||||
return makeRequest<PaginatedUsersResponse<UT>>(this.path, {
|
||||
return makeRequest<RT>(this.path, {
|
||||
params: { search, page, ...restFilters },
|
||||
});
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue