diff --git a/.github/ISSUE_TEMPLATE/issue-template.md b/.github/ISSUE_TEMPLATE/issue-template.md index c6fc0f94..f65de799 100644 --- a/.github/ISSUE_TEMPLATE/issue-template.md +++ b/.github/ISSUE_TEMPLATE/issue-template.md @@ -2,17 +2,23 @@ name: General Issue about: General requirements to all issues. title: Specific issue name -labels: '' -assignees: '' - +labels: "" +assignees: "" --- -Hi, thank you for opening an issue! +`` -Here is a quick checklist: +Hi 👋, thank you for opening an issue! -- [ ] Is it about Cloud or Open Source OnCall? +Please make sure to add such an info to the issue description: + +- [ ] Mention is it's about Cloud or Open Source OnCall. - [ ] Add OnCall backend & frontend versions. -- [ ] Include labels starting with "part:". Like `part:alertflow` or `part:schedules`. +- [ ] Include labels starting with "part:". Like `part:alertflow` or `part:schedules`. Search for all `part:` labels and + choose the closest one. - [ ] Include labels like `bug` or `feature request`. -- [ ] If it's a bug, include logs. +- [ ] If it's a bug, include logs, scheenshots, videos. As much specific info as possible. + +Issues mising those items will be closed. + +`` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 0fab1e81..0c858069 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,6 +4,6 @@ ## Checklist -- [ ] Tests updated -- [ ] Documentation added -- [ ] `CHANGELOG.md` updated +- [ ] Unit, integration, and e2e (if applicable) tests updated +- [ ] Documentation added (or `pr:no public docs` PR label added if not required) +- [ ] `CHANGELOG.md` updated (or `pr:no changelog` PR label added if not required) diff --git a/CHANGELOG.md b/CHANGELOG.md index 628fd815..1e850878 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,29 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## v1.2.2 (2023-03-27) + +### Changed + +- Drawers with Forms are not closing by clicking outside of the drawer. Only by clicking Cancel or X (by @Ukochka in [#1608](https://github.com/grafana/oncall/pull/1608)) +- When the `DANGEROUS_WEBHOOKS_ENABLED` environment variable is set to true, it's possible now to create Outgoing Webhooks + using URLs without a top-level domain (by @hoptical in [#1398](https://github.com/grafana/oncall/pull/1398)) +- Updated wording when creating an integration (by @callmehyde in [#1572](https://github.com/grafana/oncall/pull/1572)) +- Set FCM iOS/Android "message priority" to "high priority" for mobile app push notifications (by @joeyorlando in [#1612](https://github.com/grafana/oncall/pull/1612)) +- Improve schedule quality feature (by @vadimkerr in [#1602](https://github.com/grafana/oncall/pull/1602)) + +### Fixed + +- Update override deletion changes to set its final duration (by @matiasb in [#1599](https://github.com/grafana/oncall/pull/1599)) + +## v1.2.1 (2023-03-23) + +### Changed + +- Mobile app settings backend by @vadimkerr in ([1571](https://github.com/grafana/oncall/pull/1571)) +- Fix integrations and escalations autoselect, improve GList by @maskin25 in ([1601](https://github.com/grafana/oncall/pull/1601)) +- Add filters to outgoing webhooks 2 by @iskhakov in ([1598](https://github.com/grafana/oncall/pull/1598)) + ## v1.2.0 (2023-03-21) ### Changed diff --git a/docs/sources/configure-user-settings/_index.md b/docs/sources/configure-user-settings/_index.md index 99f11f23..5b2ea1e4 100644 --- a/docs/sources/configure-user-settings/_index.md +++ b/docs/sources/configure-user-settings/_index.md @@ -46,12 +46,33 @@ To learn more about RBAC for Grafana OnCall, refer to the following documentatio ## Manage Teams in Grafana OnCall -Teams in Grafana OnCall are based on the teams created at the organization level of your Grafana instance, -in **Configuration > Teams**. Administrators can create a different configuration for each team, and can navigate -between team configurations in the **Select Team** dropdown menu in the **Alert Group** section of Grafana OnCall. +Teams in Grafana OnCall enable the configuration of visibility and filtering of resources, such as alert groups, +integrations, escalation chains, and schedules. OnCall teams are automatically synced with +[Grafana teams](https://grafana.com/docs/grafana/latest/administration/team-management/) created at the organization +level of your Grafana instance. To modify global settings like team name or team members, navigate to +**Configuration > Teams**. For OnCall-specific team settings, +go to **Alerts and Incidents > OnCall > Settings > Teams and Access Settings**. -Users, including admins, can only view and manage teams in OnCall if they are a member of that team. -An admin user may need to temporarily add themselves to a team to manage it. +This section displays a list of teams, allowing you to configure team visibility and access to team resources for all +Grafana users, or only admins and team members. You can also set a default team, which is a user-specific setting; +the default team will be pre-selected each time a user creates a new resource. The team list includes a `No team` tag, +signifying that the resource has no team and is accessible to everyone. + +Admins can view the list of all teams, while editors and viewers can only see teams (and their resources) +they are members of or if the team setting "who can see the team name and access the team resources" is set to +"all users of Grafana". + +> ⚠️ In the main Grafana teams section, users can set team-specific user permissions, such as Admin, Editor, or Viewer, +> but only for resources within that team. Currently, Grafana OnCall ignores this setting and uses global roles instead. + +Teams help filter resources on their respective pages, improving organization. You can assign a resource to a team when +creating it. Alert groups created via the Integration API inherit the team from the integration. + +Resources from different teams can be connected with one another. For instance, you can create an integration in one +team, set up multiple routes for the integration, and utilize escalation chains from other teams. Users, schedules, +and outgoing webhooks from other teams can also be included in the escalation chain. If a user only has access to the +first team and not others, they will be unable to view the resource, which will display as `🔒 Private resource`. +This feature enables the distribution of escalations across various teams. ## Configure user notification policies diff --git a/docs/sources/open-source/_index.md b/docs/sources/open-source/_index.md index 5d3f981d..870e1201 100644 --- a/docs/sources/open-source/_index.md +++ b/docs/sources/open-source/_index.md @@ -224,15 +224,19 @@ the following env variables with your SMTP server credentials: After enabling the email integration, it will be possible to use the `Notify by email` notification step in user settings. -Grafana OnCall is also capable of creating alert groups from +## Inbound Email Setup + +Grafana OnCall is capable of creating alert groups from [Inbound Email integration]({{< relref "../integrations/available-integrations/configure-inbound-email" >}}). To configure Inbound Email integration for Grafana OnCall OSS populate env variables with your Email Service Provider data: -- `INBOUND_EMAIL_ESP` - Inbound email ESP name. Available options: amazon_ses, mailgun, mailjet, mandrill, postal, postmark, sendgrid, sparkpost +- `INBOUND_EMAIL_ESP` - Inbound email ESP name. Available options: `amazon_ses`, `mailgun`, `mailjet`, `mandrill`, `postal`, `postmark`, `sendgrid`, `sparkpost` - `INBOUND_EMAIL_DOMAIN` - Inbound email domain - `INBOUND_EMAIL_WEBHOOK_SECRET` - Inbound email webhook secret +You will also need to configure your ESP to forward messages to the following URL: `/integrations/v1/inbound_email_webhook`. + ## Limits By default, Grafana OnCall limits email and phone notifications (calls, SMS) to 200 per user per day. diff --git a/engine/apps/api/serializers/custom_button.py b/engine/apps/api/serializers/custom_button.py index d0026b9f..853fe28e 100644 --- a/engine/apps/api/serializers/custom_button.py +++ b/engine/apps/api/serializers/custom_button.py @@ -5,8 +5,9 @@ from rest_framework import serializers from rest_framework.validators import UniqueTogetherValidator from apps.alerts.models import CustomButton +from apps.base.utils import live_settings from common.api_helpers.custom_fields import TeamPrimaryKeyRelatedField -from common.api_helpers.utils import CurrentOrganizationDefault, CurrentTeamDefault +from common.api_helpers.utils import CurrentOrganizationDefault, CurrentTeamDefault, URLValidatorWithoutTLD from common.jinja_templater import apply_jinja_template from common.jinja_templater.apply_jinja_template import JinjaTemplateError, JinjaTemplateWarning @@ -41,7 +42,10 @@ class CustomButtonSerializer(serializers.ModelSerializer): def validate_webhook(self, webhook): if webhook: try: - URLValidator()(webhook) + if live_settings.DANGEROUS_WEBHOOKS_ENABLED: + URLValidatorWithoutTLD()(webhook) + else: + URLValidator()(webhook) except ValidationError: raise serializers.ValidationError("Webhook is incorrect") return webhook diff --git a/engine/apps/api/tests/test_custom_button.py b/engine/apps/api/tests/test_custom_button.py index cc6ae317..0c3660eb 100644 --- a/engine/apps/api/tests/test_custom_button.py +++ b/engine/apps/api/tests/test_custom_button.py @@ -11,6 +11,8 @@ from apps.alerts.models import CustomButton from apps.api.permissions import LegacyAccessControlRole TEST_URL = "https://amixr.io" +URL_WITH_TLD = "http://www.google.com" +URL_WITHOUT_TLD = "http://container:8080" @pytest.fixture() @@ -457,3 +459,36 @@ def test_get_custom_button_from_other_team_with_flag( response = client.get(url, format="json", **make_user_auth_headers(user, token)) assert response.status_code == status.HTTP_200_OK + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "dangerous_webhooks,webhook_url,expected_status", + [ + (True, URL_WITH_TLD, status.HTTP_201_CREATED), + (True, URL_WITHOUT_TLD, status.HTTP_201_CREATED), + (False, URL_WITH_TLD, status.HTTP_201_CREATED), + (False, URL_WITHOUT_TLD, status.HTTP_400_BAD_REQUEST), + ], +) +def test_url_without_tld_custom_button( + custom_button_internal_api_setup, + make_user_auth_headers, + settings, + dangerous_webhooks, + webhook_url, + expected_status, +): + settings.DANGEROUS_WEBHOOKS_ENABLED = dangerous_webhooks + + user, token, _ = custom_button_internal_api_setup + client = APIClient() + url = reverse("api-internal:custom_button-list") + + data = { + "name": "amixr_button", + "webhook": webhook_url, + "team": None, + } + response = client.post(url, data, format="json", **make_user_auth_headers(user, token)) + assert response.status_code == expected_status diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py index b66d28f5..76d8d6ac 100644 --- a/engine/apps/api/views/schedule.py +++ b/engine/apps/api/views/schedule.py @@ -27,7 +27,6 @@ from apps.auth_token.auth import PluginAuthentication from apps.auth_token.constants import SCHEDULE_EXPORT_TOKEN_NAME from apps.auth_token.models import ScheduleExportAuthToken from apps.schedules.models import OnCallSchedule -from apps.schedules.quality_score import get_schedule_quality_score from apps.slack.models import SlackChannel from apps.slack.tasks import update_slack_user_group_for_schedules from common.api_helpers.exceptions import BadRequest, Conflict @@ -353,13 +352,12 @@ class ScheduleView( @action(detail=True, methods=["get"]) def quality(self, request, pk): schedule = self.get_object() - user_tz, date = self.get_request_timezone() - days = int(self.request.query_params.get("days", 90)) # todo: check if days could be calculated more precisely - events = schedule.filter_events(user_tz, date, days=days, with_empty=True, with_gap=True) + _, date = self.get_request_timezone() + days = self.request.query_params.get("days") + days = int(days) if days else None - schedule_score = get_schedule_quality_score(events, days) - return Response(schedule_score) + return Response(schedule.quality_report(date, days)) @action(detail=False, methods=["get"]) def type_options(self, request): diff --git a/engine/apps/mobile_app/fcm_relay.py b/engine/apps/mobile_app/fcm_relay.py index 3cbc5e64..a8a47201 100644 --- a/engine/apps/mobile_app/fcm_relay.py +++ b/engine/apps/mobile_app/fcm_relay.py @@ -4,7 +4,7 @@ from celery.utils.log import get_task_logger from django.conf import settings from fcm_django.models import FCMDevice from firebase_admin.exceptions import FirebaseError -from firebase_admin.messaging import APNSConfig, APNSPayload, Aps, ApsAlert, CriticalSound, Message +from firebase_admin.messaging import AndroidConfig, APNSConfig, APNSPayload, Aps, ApsAlert, CriticalSound, Message from rest_framework import status from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response @@ -42,18 +42,19 @@ class FCMRelayView(APIView): token = request.data["token"] data = request.data["data"] apns = request.data["apns"] + android = request.data.get("android") # optional except KeyError: return Response(status=status.HTTP_400_BAD_REQUEST) - fcm_relay_async.delay(token=token, data=data, apns=apns) + fcm_relay_async.delay(token=token, data=data, apns=apns, android=android) return Response(status=status.HTTP_200_OK) @shared_dedicated_queue_retry_task( autoretry_for=(Exception,), retry_backoff=True, max_retries=1 if settings.DEBUG else 5 ) -def fcm_relay_async(token, data, apns): - message = Message(token=token, data=data, apns=deserialize_apns(apns)) +def fcm_relay_async(token, data, apns, android=None): + message = _get_message_from_request_data(token, data, apns, android) # https://firebase.google.com/docs/cloud-messaging/http-server-ref#interpret-downstream response = FCMDevice(registration_id=token).send_message(message) @@ -63,7 +64,17 @@ def fcm_relay_async(token, data, apns): raise response -def deserialize_apns(apns): +def _get_message_from_request_data(token, data, apns, android): + """ + Create Message object from JSON payload from OSS instance. + """ + + return Message( + token=token, data=data, apns=_deserialize_apns(apns), android=AndroidConfig(**android) if android else None + ) + + +def _deserialize_apns(apns): """ Create APNSConfig object from JSON payload from OSS instance. """ @@ -95,5 +106,6 @@ def deserialize_apns(apns): sound=sound, custom_data=aps, ) - ) + ), + headers=apns.get("headers"), ) diff --git a/engine/apps/mobile_app/tasks.py b/engine/apps/mobile_app/tasks.py index dec51193..76d3d32b 100644 --- a/engine/apps/mobile_app/tasks.py +++ b/engine/apps/mobile_app/tasks.py @@ -6,7 +6,7 @@ from celery.utils.log import get_task_logger from django.conf import settings from fcm_django.models import FCMDevice from firebase_admin.exceptions import FirebaseError -from firebase_admin.messaging import APNSConfig, APNSPayload, Aps, ApsAlert, CriticalSound, Message +from firebase_admin.messaging import AndroidConfig, APNSConfig, APNSPayload, Aps, ApsAlert, CriticalSound, Message from requests import HTTPError from rest_framework import status @@ -185,6 +185,21 @@ def _get_fcm_message(alert_group, user, registration_id, critical): mobile_app_user_settings.important_notification_override_dnd ), }, + android=AndroidConfig( + # from the docs + # https://firebase.google.com/docs/cloud-messaging/concept-options#setting-the-priority-of-a-message + # + # Normal priority. + # Normal priority messages are delivered immediately when the app is in the foreground. + # For backgrounded apps, delivery may be delayed. For less time-sensitive messages, such as notifications + # of new email, keeping your UI in sync, or syncing app data in the background, choose normal delivery + # priority. + # + # High priority. + # FCM attempts to deliver high priority messages immediately even if the device is in Doze mode. + # High priority messages are for time-sensitive, user visible content. + priority="high", + ), apns=APNSConfig( payload=APNSPayload( aps=Aps( @@ -202,5 +217,10 @@ def _get_fcm_message(alert_group, user, registration_id, critical): }, ), ), + headers={ + # From the docs + # https://firebase.google.com/docs/cloud-messaging/concept-options#setting-the-priority-of-a-message + "apns-priority": "10", + }, ), ) diff --git a/engine/apps/mobile_app/tests/test_fcm_relay.py b/engine/apps/mobile_app/tests/test_fcm_relay.py index 7cbfd930..47634617 100644 --- a/engine/apps/mobile_app/tests/test_fcm_relay.py +++ b/engine/apps/mobile_app/tests/test_fcm_relay.py @@ -1,3 +1,4 @@ +import json from unittest.mock import patch import pytest @@ -7,7 +8,8 @@ from firebase_admin.exceptions import FirebaseError from rest_framework import status from rest_framework.test import APIClient -from apps.mobile_app.fcm_relay import FCMRelayThrottler, fcm_relay_async +from apps.mobile_app.fcm_relay import FCMRelayThrottler, _get_message_from_request_data, fcm_relay_async +from apps.mobile_app.tasks import _get_fcm_message @pytest.mark.django_db @@ -88,3 +90,41 @@ def test_fcm_relay_async_retry(): ): with pytest.raises(FirebaseError): fcm_relay_async(token="test_token", data={}, apns={}) + + +def test_get_message_from_request_data(): + token = "test_token" + data = {"test_data_key": "test_data_value"} + apns = {"headers": {"apns-priority": "10"}, "payload": {"aps": {"thread-id": "test_thread_id"}}} + android = {"priority": "high"} + message = _get_message_from_request_data(token, data, apns, android) + + assert message.token == "test_token" + assert message.data == {"test_data_key": "test_data_value"} + assert message.apns.headers == {"apns-priority": "10"} + assert message.apns.payload.aps.thread_id == "test_thread_id" + assert message.android.priority == "high" + + +@pytest.mark.django_db +def test_fcm_relay_serialize_deserialize( + make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert +): + organization, user = make_organization_and_user() + device = FCMDevice.objects.create(user=user, registration_id="test_device_id") + + alert_receive_channel = make_alert_receive_channel(organization=organization) + alert_group = make_alert_group(alert_receive_channel) + make_alert(alert_group=alert_group, raw_request_data={}) + + # Imitate sending a message to the FCM relay endpoint + original_message = _get_fcm_message(alert_group, user, device.registration_id, critical=False) + request_data = json.loads(str(original_message)) + + # Imitate receiving a message from the FCM relay endpoint + relayed_message = _get_message_from_request_data( + request_data["token"], request_data["data"], request_data["apns"], request_data["android"] + ) + + # Check that the message is the same after serialization and deserialization + assert json.loads(str(original_message)) == json.loads(str(relayed_message)) diff --git a/engine/apps/schedules/models/custom_on_call_shift.py b/engine/apps/schedules/models/custom_on_call_shift.py index ec1c90c8..cbba2bb6 100644 --- a/engine/apps/schedules/models/custom_on_call_shift.py +++ b/engine/apps/schedules/models/custom_on_call_shift.py @@ -223,8 +223,19 @@ class CustomOnCallShift(models.Model): force = kwargs.pop("force", False) # do soft delete for started shifts that were created for web schedule if self.schedule and self.event_is_started and not force: - self.until = timezone.now().replace(microsecond=0) - self.save(update_fields=["until"]) + updated_until = timezone.now().replace(microsecond=0) + if self.until is not None and updated_until >= self.until: + # event is already finished + return + self.until = updated_until + update_fields = ["until"] + if self.type == self.TYPE_OVERRIDE: + # since it is a single-time event, update override duration + delta = self.until - self.start + if delta < self.duration: + self.duration = delta + update_fields += ["duration"] + self.save(update_fields=update_fields) else: super().delete(*args, **kwargs) diff --git a/engine/apps/schedules/models/on_call_schedule.py b/engine/apps/schedules/models/on_call_schedule.py index 4e01ef61..f1ddb17f 100644 --- a/engine/apps/schedules/models/on_call_schedule.py +++ b/engine/apps/schedules/models/on_call_schedule.py @@ -1,6 +1,9 @@ import datetime import functools import itertools +from collections import defaultdict +from enum import Enum +from typing import Iterable, Optional, TypedDict import icalendar import pytz @@ -23,9 +26,33 @@ from apps.schedules.ical_utils import ( list_of_oncall_shifts_from_ical, ) from apps.schedules.models import CustomOnCallShift +from apps.user_management.models import User from common.public_primary_keys import generate_public_primary_key, increase_public_primary_key_length +# Utility classes for schedule quality report +class QualityReportCommentType(str, Enum): + INFO = "info" + WARNING = "warning" + + +class QualityReportComment(TypedDict): + type: QualityReportCommentType + text: str + + +class QualityReportOverloadedUser(TypedDict): + id: str + username: str + score: int + + +class QualityReport(TypedDict): + total_score: int + comments: list[QualityReportComment] + overloaded_users: list[QualityReportOverloadedUser] + + def generate_public_primary_key_for_oncall_schedule_channel(): prefix = "S" new_public_primary_key = generate_public_primary_key(prefix) @@ -256,6 +283,126 @@ class OnCallSchedule(PolymorphicModel): events = self._resolve_schedule(events) return events + def quality_report(self, date: Optional[timezone.datetime], days: Optional[int]) -> QualityReport: + """ + Return schedule quality report to be used by the web UI. + TODO: Add scores on "inside working hours" and "balance outside working hours" when + TODO: working hours editor is implemented in the web UI. + """ + # get events to consider for calculation + if date is None: + today = datetime.datetime.now(tz=datetime.timezone.utc) + date = today - datetime.timedelta(days=7 - today.weekday()) # start of next week in UTC + if days is None: + days = 52 * 7 # consider next 52 weeks (~1 year) + + events = self.final_events(user_tz="UTC", starting_date=date, days=days) + + # an event is “good” if it's not a gap and not empty + good_events = [event for event in events if not event["is_gap"] and not event["is_empty"]] + if not good_events: + return { + "total_score": 0, + "comments": [{"type": QualityReportCommentType.WARNING, "text": "Schedule is empty"}], + "overloaded_users": [], + } + + def event_duration(ev: dict) -> datetime.timedelta: + return ev["end"] - ev["start"] + + def timedelta_sum(deltas: Iterable[datetime.timedelta]) -> datetime.timedelta: + return sum(deltas, start=datetime.timedelta()) + + def score_to_percent(value: float) -> int: + return round(value * 100) + + def get_duration_map(evs: list[dict]) -> dict[str, datetime.timedelta]: + """Return a map of user PKs to total duration of events they are in.""" + result = defaultdict(datetime.timedelta) + for ev in evs: + for user in ev["users"]: + user_pk = user["pk"] + result[user_pk] += event_duration(ev) + + return result + + def get_balance_score_by_duration_map(dur_map: dict[str, datetime.timedelta]) -> float: + """ + Return a score between 0 and 1, based on how balanced the durations are in the duration map. + The formula is taken from https://github.com/grafana/oncall/issues/118#issuecomment-1161787854. + """ + if len(dur_map) <= 1: + return 1 + + result = 0 + for key_1, key_2 in itertools.combinations(dur_map, 2): + duration_1 = dur_map[key_1] + duration_2 = dur_map[key_2] + + result += min(duration_1, duration_2) / max(duration_1, duration_2) + + number_of_pairs = len(dur_map) * (len(dur_map) - 1) // 2 + return result / number_of_pairs + + # calculate good event score + good_events_duration = timedelta_sum(event_duration(event) for event in good_events) + good_event_score = min(good_events_duration / datetime.timedelta(days=days), 1) + good_event_score = score_to_percent(good_event_score) + + # calculate balance score + duration_map = get_duration_map(good_events) + balance_score = get_balance_score_by_duration_map(duration_map) + balance_score = score_to_percent(balance_score) + + # calculate overloaded users + if balance_score >= 95: # tolerate minor imbalance + balance_score = 100 + overloaded_users = [] + else: + average_duration = timedelta_sum(duration_map.values()) / len(duration_map) + overloaded_user_pks = [user_pk for user_pk, duration in duration_map.items() if duration > average_duration] + usernames = { + u.public_primary_key: u.username + for u in User.objects.filter(public_primary_key__in=overloaded_user_pks).only( + "public_primary_key", "username" + ) + } + overloaded_users = [] + for user_pk in overloaded_user_pks: + score = score_to_percent(duration_map[user_pk] / average_duration) - 100 + username = usernames.get(user_pk) or "unknown" # fallback to "unknown" if user is not found + overloaded_users.append({"id": user_pk, "username": username, "score": score}) + + # show most overloaded users first + overloaded_users.sort(key=lambda u: (-u["score"], u["username"])) + + # generate comments regarding gaps + comments = [] + if good_event_score == 100: + comments.append({"type": QualityReportCommentType.INFO, "text": "Schedule has no gaps"}) + else: + not_covered = 100 - good_event_score + comments.append( + {"type": QualityReportCommentType.WARNING, "text": f"Schedule has gaps ({not_covered}% not covered)"} + ) + + # generate comments regarding balance + if balance_score == 100: + comments.append({"type": QualityReportCommentType.INFO, "text": "Schedule is perfectly balanced"}) + else: + comments.append( + {"type": QualityReportCommentType.WARNING, "text": "Schedule has balance issues (see overloaded users)"} + ) + + # calculate total score (weighted sum of good event score and balance score) + total_score = round((good_event_score + balance_score) / 2) + + return { + "total_score": total_score, + "comments": comments, + "overloaded_users": overloaded_users, + } + def _resolve_schedule(self, events): """Calculate final schedule shifts considering rotations and overrides.""" if not events: diff --git a/engine/apps/schedules/quality_score.py b/engine/apps/schedules/quality_score.py deleted file mode 100644 index b152e0cc..00000000 --- a/engine/apps/schedules/quality_score.py +++ /dev/null @@ -1,117 +0,0 @@ -import datetime -import enum -import itertools -from collections import defaultdict -from typing import Iterable, Union - -import pytz - - -class CommentType(str, enum.Enum): - INFO = "info" - WARNING = "warning" - - -# TODO: add "inside working hours score" and "balance outside working hours score" when working hours editor is implemented -def get_schedule_quality_score(events: list[dict], days: int) -> dict: - # an event is “good” if it's a primary event, not a gap and not empty - good_events = [ - event for event in events if not event["is_override"] and not event["is_gap"] and not event["is_empty"] - ] - good_event_score = get_good_event_score(good_events, days) - - # formula for balance score is taken from here: https://github.com/grafana/oncall/issues/118 - balance_score, overloaded_users = get_balance_score(good_events) - - if events: - total_score = (good_event_score + balance_score) / 2 - else: - total_score = 0 - - comments = [] - if good_event_score < 1: - comments.append({"type": CommentType.WARNING, "text": "Schedule has gaps"}) - else: - comments.append({"type": CommentType.INFO, "text": "Schedule has no gaps"}) - - if balance_score < 0.8: - comments.append({"type": CommentType.WARNING, "text": "Schedule has balance issues"}) - elif 0.8 <= balance_score < 1: - comments.append({"type": CommentType.INFO, "text": "Schedule is well-balanced, but still can be improved"}) - else: - comments.append({"type": CommentType.INFO, "text": "Schedule is perfectly balanced"}) - - return { - "total_score": score_to_percent(total_score), - "comments": comments, - "overloaded_users": overloaded_users, - } - - -def get_good_event_score(good_events: list[dict], days: int) -> float: - good_events_duration = timedelta_sum(event_duration(event) for event in good_events) - good_event_score = min( - good_events_duration / datetime.timedelta(days=days), 1 - ) # todo: deal with overlapping events - - return good_event_score - - -def get_balance_score(events: list[dict]) -> tuple[float, list[str]]: - duration_map = defaultdict(datetime.timedelta) - for event in events: - for user in event["users"]: - user_pk = user["pk"] - duration_map[user_pk] += event_duration(event) - - if len(duration_map) == 0: - return 1, [] - - average_duration = timedelta_sum(duration_map.values()) / len(duration_map) - overloaded_users = [user_pk for user_pk, duration in duration_map.items() if duration > average_duration] - - return get_balance_score_by_duration_map(duration_map), overloaded_users - - -def get_balance_score_by_duration_map(duration_map: dict[str, datetime.timedelta]) -> float: - if len(duration_map) <= 1: - return 1 - - score = 0 - for key_1, key_2 in itertools.combinations(duration_map, 2): - duration_1 = duration_map[key_1] - duration_2 = duration_map[key_2] - - score += min(duration_1, duration_2) / max(duration_1, duration_2) - - number_of_pairs = len(duration_map) * (len(duration_map) - 1) // 2 - balance_score = score / number_of_pairs - return balance_score - - -def get_day_start(dt: Union[datetime.datetime, datetime.date]) -> datetime.datetime: - return datetime.datetime.combine(dt, datetime.datetime.min.time(), tzinfo=pytz.UTC) - - -def get_day_end(dt: Union[datetime.datetime, datetime.date]) -> datetime.datetime: - return datetime.datetime.combine(dt, datetime.datetime.max.time(), tzinfo=pytz.UTC) - - -def event_duration(event: dict) -> datetime.timedelta: - start = event["start"] - end = event["end"] - - if event["all_day"]: - start = get_day_start(start) - # adding one microsecond to the end datetime to make sure 1 day-long events are really 1 day long - end = get_day_end(end) + datetime.timedelta(microseconds=1) - - return end - start - - -def timedelta_sum(deltas: Iterable[datetime.timedelta]) -> datetime.timedelta: - return sum(deltas, start=datetime.timedelta()) - - -def score_to_percent(score: float) -> int: - return round(score * 100) diff --git a/engine/apps/schedules/tests/test_custom_on_call_shift.py b/engine/apps/schedules/tests/test_custom_on_call_shift.py index fc0efc96..f21674f4 100644 --- a/engine/apps/schedules/tests/test_custom_on_call_shift.py +++ b/engine/apps/schedules/tests/test_custom_on_call_shift.py @@ -1585,3 +1585,43 @@ def test_delete_shift(make_organization_and_user, make_schedule, make_on_call_sh else: on_call_shift.refresh_from_db() assert on_call_shift.until is not None + + +@pytest.mark.django_db +@pytest.mark.parametrize( + "starting_day,duration,deleted", + [ + (-1, 2, False), + (-2, 1, False), + (1, 1, True), + ], +) +def test_delete_override( + make_organization_and_user, make_schedule, make_on_call_shift, starting_day, duration, deleted +): + organization, _ = make_organization_and_user() + schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb) + start_date = (timezone.now() + timezone.timedelta(days=starting_day)).replace(microsecond=0) + + data = { + "start": start_date, + "rotation_start": start_date, + "duration": timezone.timedelta(days=duration), + "schedule": schedule, + } + on_call_shift = make_on_call_shift(organization=organization, shift_type=CustomOnCallShift.TYPE_OVERRIDE, **data) + original_duration = on_call_shift.duration + + on_call_shift.delete() + + if deleted: + with pytest.raises(CustomOnCallShift.DoesNotExist): + on_call_shift.refresh_from_db() + else: + on_call_shift.refresh_from_db() + assert on_call_shift.until is not None + assert ( + on_call_shift.duration == original_duration + if (starting_day + duration) < 0 + else on_call_shift.duration < original_duration + ) diff --git a/engine/apps/schedules/tests/test_quality_score.py b/engine/apps/schedules/tests/test_quality_score.py index 4d313c27..5926f929 100644 --- a/engine/apps/schedules/tests/test_quality_score.py +++ b/engine/apps/schedules/tests/test_quality_score.py @@ -1,10 +1,12 @@ +import datetime + import pytest from rest_framework import status from rest_framework.reverse import reverse from rest_framework.test import APIClient from apps.schedules.ical_utils import memoized_users_in_ical -from apps.schedules.models import OnCallScheduleICal +from apps.schedules.models import CustomOnCallShift, OnCallScheduleICal, OnCallScheduleWeb @pytest.fixture @@ -54,8 +56,7 @@ def test_get_schedule_score_no_events(get_schedule_quality_response): assert response.json() == { "total_score": 0, "comments": [ - {"type": "warning", "text": "Schedule has gaps"}, - {"type": "info", "text": "Schedule is perfectly balanced"}, + {"type": "warning", "text": "Schedule is empty"}, ], "overloaded_users": [], } @@ -67,12 +68,18 @@ def test_get_schedule_score_09_05(get_schedule_quality_response): assert response.status_code == status.HTTP_200_OK assert response.json() == { - "total_score": 27, + "total_score": 28, "comments": [ - {"type": "warning", "text": "Schedule has gaps"}, - {"type": "warning", "text": "Schedule has balance issues"}, + {"type": "warning", "text": "Schedule has gaps (79% not covered)"}, + {"type": "warning", "text": "Schedule has balance issues (see overloaded users)"}, + ], + "overloaded_users": [ + { + "id": user1.public_primary_key, + "username": user1.username, + "score": 49, + }, ], - "overloaded_users": [user1.public_primary_key], } @@ -84,10 +91,16 @@ def test_get_schedule_score_09_09(get_schedule_quality_response): assert response.json() == { "total_score": 51, "comments": [ - {"type": "warning", "text": "Schedule has gaps"}, - {"type": "info", "text": "Schedule is well-balanced, but still can be improved"}, + {"type": "warning", "text": "Schedule has gaps (81% not covered)"}, + {"type": "warning", "text": "Schedule has balance issues (see overloaded users)"}, + ], + "overloaded_users": [ + { + "id": user2.public_primary_key, + "username": user2.username, + "score": 9, + }, ], - "overloaded_users": [user2.public_primary_key], } @@ -113,8 +126,233 @@ def test_get_schedule_score_09_19(get_schedule_quality_response): assert response.json() == { "total_score": 70, "comments": [ - {"type": "warning", "text": "Schedule has gaps"}, + {"type": "warning", "text": "Schedule has gaps (59% not covered)"}, {"type": "info", "text": "Schedule is perfectly balanced"}, ], "overloaded_users": [], } + + +@pytest.mark.django_db +def test_get_schedule_score_weekdays( + make_organization, + make_user_for_organization, + make_token_for_organization, + make_schedule, + make_on_call_shift, + make_user_auth_headers, +): + organization = make_organization() + _, token = make_token_for_organization(organization) + + schedule = make_schedule( + organization, + schedule_class=OnCallScheduleWeb, + name="test_quality", + ) + + users = [make_user_for_organization(organization, username=f"user-{idx}") for idx in range(8)] + # clear users pks <-> organization cache (persisting between tests) + memoized_users_in_ical.cache_clear() + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[:4]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[4:]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + client = APIClient() + + url = reverse("api-internal:schedule-quality", kwargs={"pk": schedule.public_primary_key}) + "?date=2022-03-24" + response = client.get(url, **make_user_auth_headers(users[0], token)) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "total_score": 86, + "comments": [ + {"type": "warning", "text": "Schedule has gaps (29% not covered)"}, + {"type": "info", "text": "Schedule is perfectly balanced"}, + ], + "overloaded_users": [], + } + + +@pytest.mark.django_db +def test_get_schedule_score_all_week( + make_organization, + make_user_for_organization, + make_token_for_organization, + make_schedule, + make_on_call_shift, + make_user_auth_headers, +): + organization = make_organization() + _, token = make_token_for_organization(organization) + + schedule = make_schedule( + organization, + schedule_class=OnCallScheduleWeb, + name="test_quality", + ) + + users = [make_user_for_organization(organization, username=f"user-{idx}") for idx in range(8)] + # clear users pks <-> organization cache (persisting between tests) + memoized_users_in_ical.cache_clear() + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[:4]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[4:]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=24), + rotation_start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["SA", "SU"], + ) + + client = APIClient() + + url = reverse("api-internal:schedule-quality", kwargs={"pk": schedule.public_primary_key}) + "?date=2022-03-24" + response = client.get(url, **make_user_auth_headers(users[0], token)) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "total_score": 100, + "comments": [ + {"type": "info", "text": "Schedule has no gaps"}, + {"type": "info", "text": "Schedule is perfectly balanced"}, + ], + "overloaded_users": [], + } + + +@pytest.mark.django_db +def test_get_schedule_score_all_week_imbalanced_weekends( + make_organization, + make_user_for_organization, + make_token_for_organization, + make_schedule, + make_on_call_shift, + make_user_auth_headers, +): + organization = make_organization() + _, token = make_token_for_organization(organization) + + schedule = make_schedule( + organization, + schedule_class=OnCallScheduleWeb, + name="test_quality", + ) + + users = [make_user_for_organization(organization, username=f"user-{idx}") for idx in range(8)] + # clear users pks <-> organization cache (persisting between tests) + memoized_users_in_ical.cache_clear() + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 0, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[:4]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=12), + rotation_start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[4:]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["MO", "TU", "WE", "TH", "FR"], + ) + + make_on_call_shift( + schedule.organization, + shift_type=CustomOnCallShift.TYPE_ROLLING_USERS_EVENT, + schedule=schedule, + start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + duration=datetime.timedelta(hours=24), + rotation_start=datetime.datetime(2022, 3, 20, 12, 0, 0, tzinfo=datetime.timezone.utc), + until=None, + rolling_users=[{user.pk: user.public_primary_key for user in users[:4]}], + frequency=CustomOnCallShift.FREQUENCY_WEEKLY, + by_day=["SA", "SU"], + ) + + client = APIClient() + + url = reverse("api-internal:schedule-quality", kwargs={"pk": schedule.public_primary_key}) + "?date=2022-03-24" + response = client.get(url, **make_user_auth_headers(users[0], token)) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == { + "total_score": 88, + "comments": [ + {"type": "info", "text": "Schedule has no gaps"}, + {"type": "warning", "text": "Schedule has balance issues (see overloaded users)"}, + ], + "overloaded_users": [ + { + "id": user.public_primary_key, + "username": user.username, + "score": 29, + } + for user in users[:4] + ], + } diff --git a/engine/common/api_helpers/utils.py b/engine/common/api_helpers/utils.py index c82f64af..cc805ccc 100644 --- a/engine/common/api_helpers/utils.py +++ b/engine/common/api_helpers/utils.py @@ -1,9 +1,12 @@ import datetime +import re from urllib.parse import urljoin import requests from django.conf import settings +from django.core.validators import URLValidator from django.utils import dateparse, timezone +from django.utils.regex_helper import _lazy_re_compile from icalendar import Calendar from rest_framework import serializers @@ -45,6 +48,34 @@ class CurrentTeamDefault: return "%s()" % self.__class__.__name__ +class URLValidatorWithoutTLD(URLValidator): + """ + Overrides Django URLValidator Regex. It removes the tld part because + most of the time, containers don't have any TLD in their urls and such outgoing webhooks + can't be registered. + """ + + host_re = ( + "(" + + URLValidator.hostname_re + + URLValidator.domain_re + + URLValidator.tld_re + + "|" + + URLValidator.hostname_re + + "|localhost)" + ) + + regex = _lazy_re_compile( + r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately + r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication + r"(?:" + URLValidator.ipv4_re + "|" + URLValidator.ipv6_re + "|" + host_re + ")" + r"(?::[0-9]{1,5})?" # port + r"(?:[/?#][^\s]*)?" # resource path + r"\Z", + re.IGNORECASE, + ) + + class CurrentUserDefault: """ Utility class to get the current user right from the serializer field. diff --git a/engine/common/tests/test_urlvalidator_without_tld.py b/engine/common/tests/test_urlvalidator_without_tld.py new file mode 100644 index 00000000..7df900cd --- /dev/null +++ b/engine/common/tests/test_urlvalidator_without_tld.py @@ -0,0 +1,20 @@ +import pytest +from django.core.validators import ValidationError + +from common.api_helpers.utils import URLValidatorWithoutTLD + +valid_urls = ["https://www.google.com", "https://www.google", "http://conatainer1"] +invalid_urls = ["https:/www.google.com", "htt://www.google.com/"] + + +@pytest.mark.parametrize("url", valid_urls) +def test_urlvalidator_without_tld_valid_urls(url): + # Test valid URLs + URLValidatorWithoutTLD()(url) + + +@pytest.mark.parametrize("url", invalid_urls) +def test_urlvalidator_without_tld_invalid_urls(url): + # Test an invalid URL + with pytest.raises(ValidationError): + URLValidatorWithoutTLD()(url) diff --git a/engine/engine/wsgi.py b/engine/engine/wsgi.py index c95992be..1642bd9d 100644 --- a/engine/engine/wsgi.py +++ b/engine/engine/wsgi.py @@ -40,3 +40,22 @@ if settings.OTEL_TRACING_ENABLED and settings.OTEL_EXPORTER_OTLP_ENDPOINT: except ModuleNotFoundError: # Only works under uwsgi web server environment pass + +if settings.PYROSCOPE_PROFILER_ENABLED: + try: + import pyroscope + from uwsgidecorators import postfork + + @postfork + def init_pyroscope(): + pyroscope.configure( + application_name=settings.PYROSCOPE_APPLICATION_NAME, + server_address=settings.PYROSCOPE_SERVER_ADDRESS, + auth_token=settings.PYROSCOPE_AUTH_TOKEN, + detect_subprocesses=True, + tags={"celery_worker": settings.PYROSCOPE_CELERY_WORKER_QUEUE}, + ) + + except ModuleNotFoundError: + # Only works under uwsgi web server environment + pass diff --git a/engine/settings/base.py b/engine/settings/base.py index c647fb51..97a8fee0 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -678,15 +678,7 @@ MIGRATION_LINTER_OPTIONS = {"exclude_apps": ["social_django", "silk", "fcm_djang MIGRATION_LINTER_OVERRIDE_MAKEMIGRATIONS = True PYROSCOPE_PROFILER_ENABLED = getenv_boolean("PYROSCOPE_PROFILER_ENABLED", default=False) -if PYROSCOPE_PROFILER_ENABLED: - import pyroscope - - pyroscope.configure( - application_name=os.getenv("PYROSCOPE_APPLICATION_NAME", "oncall"), - server_address=os.getenv("PYROSCOPE_SERVER_ADDRESS", "http://pyroscope:4040"), - auth_token=os.getenv("PYROSCOPE_AUTH_TOKEN", ""), - detect_subprocesses=True, - tags={ - "celery_worker": os.getenv("CELERY_WORKER_QUEUE", None), - }, - ) +PYROSCOPE_APPLICATION_NAME = os.getenv("PYROSCOPE_APPLICATION_NAME", "oncall") +PYROSCOPE_SERVER_ADDRESS = os.getenv("PYROSCOPE_SERVER_ADDRESS", "http://pyroscope:4040") +PYROSCOPE_AUTH_TOKEN = os.getenv("PYROSCOPE_AUTH_TOKEN", "") +PYROSCOPE_CELERY_WORKER_QUEUE = os.getenv("CELERY_WORKER_QUEUE", None) diff --git a/grafana-plugin/integration-tests/schedules/quality.test.ts b/grafana-plugin/integration-tests/schedules/quality.test.ts new file mode 100644 index 00000000..952e22d8 --- /dev/null +++ b/grafana-plugin/integration-tests/schedules/quality.test.ts @@ -0,0 +1,19 @@ +import { test, expect } from '@playwright/test'; +import { configureOnCallPlugin } from '../utils/configurePlugin'; +import { generateRandomValue } from '../utils/forms'; +import { createOnCallSchedule } from '../utils/schedule'; + +test.beforeEach(async ({ page }) => { + await configureOnCallPlugin(page); +}); + +test('check schedule quality for simple 1-user schedule', async ({ page }) => { + const onCallScheduleName = generateRandomValue(); + await createOnCallSchedule(page, onCallScheduleName); + + await expect(page.locator('div[class*="ScheduleQuality"]')).toHaveText('Quality: Great'); + + await page.hover('div[class*="ScheduleQuality"]'); + await expect(page.locator('div[class*="ScheduleQualityDetails"] >> span[class*="Text"] >> nth=2 ')).toHaveText('Schedule has no gaps'); + await expect(page.locator('div[class*="ScheduleQualityDetails"] >> span[class*="Text"] >> nth=3 ')).toHaveText('Schedule is perfectly balanced'); +}); diff --git a/grafana-plugin/src/components/GForm/GForm.tsx b/grafana-plugin/src/components/GForm/GForm.tsx index a6d65392..5e2c404a 100644 --- a/grafana-plugin/src/components/GForm/GForm.tsx +++ b/grafana-plugin/src/components/GForm/GForm.tsx @@ -1,4 +1,4 @@ -import React, { useCallback } from 'react'; +import React from 'react'; import { Field, Form, Input, InputControl, Select, Switch, TextArea } from '@grafana/ui'; import { capitalCase } from 'change-case'; @@ -17,22 +17,57 @@ const nullNormalizer = (value: string) => { return value || null; }; -function renderFormControl(formItem: FormItem, register: any, control: any) { +function renderFormControl(formItem: FormItem, register: any, control: any, onChangeFn: () => void) { switch (formItem.type) { case FormItemType.Input: - return ; + return ; case FormItemType.TextArea: - return